aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acoutput.h13
-rw-r--r--include/acpi/acpi_bus.h43
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h28
-rw-r--r--include/acpi/actbl.h18
-rw-r--r--include/acpi/actbl1.h187
-rw-r--r--include/acpi/actbl2.h226
-rw-r--r--include/acpi/actbl3.h98
-rw-r--r--include/acpi/actypes.h39
-rw-r--r--include/acpi/acuuid.h89
-rw-r--r--include/acpi/platform/acenv.h42
-rw-r--r--include/acpi/platform/acenvex.h9
-rw-r--r--include/acpi/platform/acgcc.h4
-rw-r--r--include/acpi/video.h21
-rw-r--r--include/asm-generic/asm-offsets.h1
-rw-r--r--include/asm-generic/barrier.h28
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/asm-generic/pgtable.h34
-rw-r--r--include/asm-generic/scatterlist.h34
-rw-r--r--include/clocksource/timer-sp804.h28
-rw-r--r--include/drm/bridge/ptn3460.h45
-rw-r--r--include/drm/drmP.h19
-rw-r--r--include/drm/drm_atomic.h95
-rw-r--r--include/drm/drm_atomic_helper.h5
-rw-r--r--include/drm/drm_crtc.h65
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h6
-rw-r--r--include/drm/drm_dp_mst_helper.h4
-rw-r--r--include/drm/drm_mem_util.h5
-rw-r--r--include/drm/drm_modes.h4
-rw-r--r--include/drm/i915_component.h1
-rw-r--r--include/drm/i915_pciids.h4
-rw-r--r--include/dt-bindings/clock/bcm-cygnus.h68
-rw-r--r--include/dt-bindings/clock/hi6220-clock.h173
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h450
-rw-r--r--include/dt-bindings/clock/jz4740-cgu.h37
-rw-r--r--include/dt-bindings/clock/jz4780-cgu.h88
-rw-r--r--include/dt-bindings/clock/lpc18xx-ccu.h74
-rw-r--r--include/dt-bindings/clock/lpc18xx-cgu.h41
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h3
-rw-r--r--include/dt-bindings/clock/marvell,pxa1928.h57
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h4
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h25
-rw-r--r--include/dt-bindings/clock/mt8135-clk.h194
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h235
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h2
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h3
-rw-r--r--include/dt-bindings/clock/samsung,s2mps11.h23
-rw-r--r--include/dt-bindings/clock/vf610-clock.h3
-rw-r--r--include/dt-bindings/clock/zx296702-clock.h170
-rw-r--r--include/dt-bindings/mfd/arizona.h18
-rw-r--r--include/dt-bindings/mfd/st-lpc.h15
-rw-r--r--include/dt-bindings/net/ti-dp83867.h45
-rw-r--r--include/dt-bindings/phy/phy-pistachio-usb.h16
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h1
-rw-r--r--include/dt-bindings/pinctrl/bcm2835.h27
-rw-r--r--include/dt-bindings/pinctrl/mt6397-pinfunc.h256
-rw-r--r--include/dt-bindings/reset-controller/mt8135-resets.h64
-rw-r--r--include/dt-bindings/reset-controller/mt8173-resets.h63
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq806x.h43
-rw-r--r--include/dt-bindings/sound/apq8016-lpass.h9
-rw-r--r--include/dt-bindings/sound/audio-jack-events.h9
-rw-r--r--include/dt-bindings/sound/tas2552.h18
-rw-r--r--include/linux/acpi.h82
-rw-r--r--include/linux/amba/sp810.h2
-rw-r--r--include/linux/ata.h13
-rw-r--r--include/linux/backing-dev-defs.h256
-rw-r--r--include/linux/backing-dev.h561
-rw-r--r--include/linux/backlight.h8
-rw-r--r--include/linux/bcm47xx_nvram.h17
-rw-r--r--include/linux/bcma/bcma.h9
-rw-r--r--include/linux/bcma/bcma_driver_pci.h11
-rw-r--r--include/linux/bio.h20
-rw-r--r--include/linux/blk-cgroup.h650
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h23
-rw-r--r--include/linux/blkdev.h63
-rw-r--r--include/linux/bootmem.h8
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/brcmphy.h7
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/cacheinfo.h2
-rw-r--r--include/linux/can/skb.h2
-rw-r--r--include/linux/ceph/libceph.h21
-rw-r--r--include/linux/ceph/messenger.h3
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup-defs.h501
-rw-r--r--include/linux/cgroup.h994
-rw-r--r--include/linux/clk-provider.h16
-rw-r--r--include/linux/clk.h27
-rw-r--r--include/linux/clkdev.h11
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler-gcc.h207
-rw-r--r--include/linux/compiler-gcc3.h23
-rw-r--r--include/linux/compiler-gcc4.h91
-rw-r--r--include/linux/compiler-gcc5.h67
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/compiler.h17
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/console.h1
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/cper.h22
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpu_cooling.h39
-rw-r--r--include/linux/cpufreq.h6
-rw-r--r--include/linux/cpuidle.h20
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/crc-t10dif.h1
-rw-r--r--include/linux/crush/crush.h40
-rw-r--r--include/linux/crush/hash.h6
-rw-r--r--include/linux/crush/mapper.h2
-rw-r--r--include/linux/dcache.h10
-rw-r--r--include/linux/device.h68
-rw-r--r--include/linux/dma-buf.h10
-rw-r--r--include/linux/dma/pxa-dma.h27
-rw-r--r--include/linux/dmaengine.h76
-rw-r--r--include/linux/dmapool.h2
-rw-r--r--include/linux/dmi.h4
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/etherdevice.h42
-rw-r--r--include/linux/extcon.h134
-rw-r--r--include/linux/extcon/extcon-adc-jack.h5
-rw-r--r--include/linux/f2fs_fs.h8
-rw-r--r--include/linux/fdtable.h3
-rw-r--r--include/linux/filter.h30
-rw-r--r--include/linux/frontswap.h14
-rw-r--r--include/linux/fs.h88
-rw-r--r--include/linux/fscache-cache.h55
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/gfp.h13
-rw-r--r--include/linux/goldfish.h19
-rw-r--r--include/linux/gpio/consumer.h15
-rw-r--r--include/linux/gpio/driver.h2
-rw-r--r--include/linux/hid-sensor-hub.h1
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/hwspinlock.h7
-rw-r--r--include/linux/hyperv.h48
-rw-r--r--include/linux/i2c/twl.h1
-rw-r--r--include/linux/ide.h27
-rw-r--r--include/linux/ieee802154.h16
-rw-r--r--include/linux/if_link.h9
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_vlan.h28
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/buffer.h3
-rw-r--r--include/linux/iio/iio.h3
-rw-r--r--include/linux/iio/types.h2
-rw-r--r--include/linux/inet_diag.h1
-rw-r--r--include/linux/inetdevice.h3
-rw-r--r--include/linux/init.h89
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/input/touchscreen.h5
-rw-r--r--include/linux/intel-iommu.h5
-rw-r--r--include/linux/iommu.h46
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqchip.h14
-rw-r--r--include/linux/irqchip/ingenic.h23
-rw-r--r--include/linux/irqchip/irq-sa11x0.h17
-rw-r--r--include/linux/irqdesc.h56
-rw-r--r--include/linux/irqnr.h6
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/kernel.h36
-rw-r--r--include/linux/kernfs.h8
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/kobject.h5
-rw-r--r--include/linux/kvm_host.h114
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/leds.h25
-rw-r--r--include/linux/libata.h5
-rw-r--r--include/linux/libfdt_env.h4
-rw-r--r--include/linux/libnvdimm.h151
-rw-r--r--include/linux/livepatch.h8
-rw-r--r--include/linux/lockdep.h10
-rw-r--r--include/linux/lsm_hooks.h1888
-rw-r--r--include/linux/mailbox_client.h2
-rw-r--r--include/linux/mailbox_controller.h2
-rw-r--r--include/linux/mdio-gpio.h3
-rw-r--r--include/linux/mei_cl_bus.h38
-rw-r--r--include/linux/memblock.h67
-rw-r--r--include/linux/memcontrol.h29
-rw-r--r--include/linux/mfd/arizona/core.h9
-rw-r--r--include/linux/mfd/arizona/pdata.h8
-rw-r--r--include/linux/mfd/arizona/registers.h27
-rw-r--r--include/linux/mfd/axp20x.h98
-rw-r--r--include/linux/mfd/cros_ec.h86
-rw-r--r--include/linux/mfd/cros_ec_commands.h277
-rw-r--r--include/linux/mfd/da9055/core.h2
-rw-r--r--include/linux/mfd/da9063/pdata.h1
-rw-r--r--include/linux/mfd/max77686.h5
-rw-r--r--include/linux/mfd/stmpe.h44
-rw-r--r--include/linux/mfd/syscon/atmel-mc.h144
-rw-r--r--include/linux/mlx4/cmd.h6
-rw-r--r--include/linux/mlx4/device.h30
-rw-r--r--include/linux/mlx5/cq.h3
-rw-r--r--include/linux/mlx5/device.h215
-rw-r--r--include/linux/mlx5/driver.h173
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6584
-rw-r--r--include/linux/mlx5/qp.h25
-rw-r--r--include/linux/mlx5/vport.h55
-rw-r--r--include/linux/mm-arch-hooks.h25
-rw-r--r--include/linux/mm.h53
-rw-r--r--include/linux/mm_types.h18
-rw-r--r--include/linux/mmiotrace.h2
-rw-r--r--include/linux/mmu_notifier.h12
-rw-r--r--include/linux/mmzone.h23
-rw-r--r--include/linux/mod_devicetable.h21
-rw-r--r--include/linux/module.h145
-rw-r--r--include/linux/moduleparam.h111
-rw-r--r--include/linux/mtd/cfi.h188
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/nd.h151
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h5
-rw-r--r--include/linux/netdevice.h31
-rw-r--r--include/linux/netfilter.h45
-rw-r--r--include/linux/netfilter/ipset/ip_set.h61
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h38
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h27
-rw-r--r--include/linux/netfilter/x_tables.h60
-rw-r--r--include/linux/netfilter_bridge.h7
-rw-r--r--include/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/linux/netfilter_defs.h9
-rw-r--r--include/linux/netfilter_ingress.h41
-rw-r--r--include/linux/netfilter_ipv6.h3
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs.h8
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/nfs_xdr.h51
-rw-r--r--include/linux/nmi.h3
-rw-r--r--include/linux/ntb.h970
-rw-r--r--include/linux/ntb_transport.h85
-rw-r--r--include/linux/nvme.h31
-rw-r--r--include/linux/of.h28
-rw-r--r--include/linux/of_device.h9
-rw-r--r--include/linux/of_dma.h21
-rw-r--r--include/linux/of_fdt.h4
-rw-r--r--include/linux/of_graph.h8
-rw-r--r--include/linux/oom.h12
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/page_owner.h13
-rw-r--r--include/linux/pagemap.h9
-rw-r--r--include/linux/parport.h43
-rw-r--r--include/linux/pata_arasan_cf_data.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf_event.h7
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy/phy-sun4i-usb.h26
-rw-r--r--include/linux/phy/phy.h9
-rw-r--r--include/linux/pinctrl/consumer.h2
-rw-r--r--include/linux/pinctrl/pinctrl.h2
-rw-r--r--include/linux/pinctrl/pinmux.h6
-rw-r--r--include/linux/platform_data/dma-rcar-audmapp.h34
-rw-r--r--include/linux/platform_data/gpio-ath79.h19
-rw-r--r--include/linux/platform_data/keyboard-spear.h2
-rw-r--r--include/linux/platform_data/macb.h14
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/nfcmrvl.h40
-rw-r--r--include/linux/platform_data/st-nci.h (renamed from include/linux/platform_data/st21nfcb.h)14
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/usb-rcar-gen2-phy.h22
-rw-r--r--include/linux/platform_data/video-msm_fb.h146
-rw-r--r--include/linux/platform_data/wkup_m3.h30
-rw-r--r--include/linux/platform_device.h23
-rw-r--r--include/linux/pm.h14
-rw-r--r--include/linux/pm_clock.h10
-rw-r--r--include/linux/pm_wakeirq.h51
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/pmem.h152
-rw-r--r--include/linux/power/max17042_battery.h4
-rw-r--r--include/linux/power_supply.h11
-rw-r--r--include/linux/preempt.h2
-rw-r--r--include/linux/printk.h8
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/pxa2xx_ssp.h3
-rw-r--r--include/linux/qcom_scm.h13
-rw-r--r--include/linux/rbtree.h16
-rw-r--r--include/linux/rbtree_augmented.h21
-rw-r--r--include/linux/rbtree_latch.h212
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/regmap.h14
-rw-r--r--include/linux/regulator/driver.h11
-rw-r--r--include/linux/regulator/machine.h9
-rw-r--r--include/linux/regulator/max8973-regulator.h4
-rw-r--r--include/linux/remoteproc.h9
-rw-r--r--include/linux/reset/bcm63xx_pmb.h88
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/rtc.h20
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h4
-rw-r--r--include/linux/rtnetlink.h16
-rw-r--r--include/linux/scatterlist.h46
-rw-r--r--include/linux/sched.h105
-rw-r--r--include/linux/scif.h993
-rw-r--r--include/linux/security.h1623
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/seqlock.h81
-rw-r--r--include/linux/serial_8250.h3
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/serial_sci.h86
-rw-r--r--include/linux/skbuff.h77
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/smpboot.h5
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h19
-rw-r--r--include/linux/sock_diag.h42
-rw-r--r--include/linux/spi/cc2520.h1
-rw-r--r--include/linux/ssb/ssb.h8
-rw-r--r--include/linux/stddef.h8
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string.h1
-rw-r--r--include/linux/sunrpc/bc_xprt.h1
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h19
-rw-r--r--include/linux/sunrpc/svc_rdma.h11
-rw-r--r--include/linux/sunrpc/xprt.h39
-rw-r--r--include/linux/sunrpc/xprtrdma.h3
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/syscalls.h18
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/sysfs.h15
-rw-r--r--include/linux/syslog.h6
-rw-r--r--include/linux/tcp.h15
-rw-r--r--include/linux/thermal.h97
-rw-r--r--include/linux/tick.h19
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/linux/trace_events.h (renamed from include/linux/ftrace_event.h)177
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/u64_stats_sync.h7
-rw-r--r--include/linux/ulpi/driver.h60
-rw-r--r--include/linux/ulpi/interface.h23
-rw-r--r--include/linux/ulpi/regs.h130
-rw-r--r--include/linux/usb/cdc_ncm.h7
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/msm_hsusb.h22
-rw-r--r--include/linux/usb/msm_hsusb_hw.h9
-rw-r--r--include/linux/usb/net2280.h3
-rw-r--r--include/linux/usb/phy.h8
-rw-r--r--include/linux/usb/renesas_usbhs.h3
-rw-r--r--include/linux/usb/ulpi.h134
-rw-r--r--include/linux/usb/usb338x.h4
-rw-r--r--include/linux/virtio_byteorder.h24
-rw-r--r--include/linux/virtio_config.h18
-rw-r--r--include/linux/vme.h2
-rw-r--r--include/linux/vringh.h18
-rw-r--r--include/linux/wait.h13
-rw-r--r--include/linux/watchdog.h3
-rw-r--r--include/linux/workqueue.h31
-rw-r--r--include/linux/writeback.h221
-rw-r--r--include/linux/zpool.h5
-rw-r--r--include/media/adp1653.h8
-rw-r--r--include/media/adv7511.h7
-rw-r--r--include/media/adv7604.h1
-rw-r--r--include/media/adv7842.h142
-rw-r--r--include/media/rc-core.h9
-rw-r--r--include/media/rc-map.h4
-rw-r--r--include/media/v4l2-dv-timings.h6
-rw-r--r--include/media/v4l2-flash-led-class.h148
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/v4l2-of.h20
-rw-r--r--include/media/v4l2-subdev.h20
-rw-r--r--include/media/videobuf2-core.h13
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h207
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/af_vsock.h2
-rw-r--r--include/net/ax25.h16
-rw-r--r--include/net/bluetooth/bluetooth.h11
-rw-r--r--include/net/bluetooth/hci.h10
-rw-r--r--include/net/bluetooth/hci_core.h47
-rw-r--r--include/net/bond_options.h3
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/cfg80211.h25
-rw-r--r--include/net/cfg802154.h70
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/codel.h12
-rw-r--r--include/net/dst.h18
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow_dissector.h220
-rw-r--r--include/net/flow_keys.h45
-rw-r--r--include/net/geneve.h5
-rw-r--r--include/net/ieee802154_netdev.h34
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_frag.h19
-rw-r--r--include/net/inet_hashtables.h49
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ip.h44
-rw-r--r--include/net/ip6_fib.h45
-rw-r--r--include/net/ip6_route.h21
-rw-r--r--include/net/ip_fib.h23
-rw-r--r--include/net/ipv6.h43
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/mac80211.h265
-rw-r--r--include/net/mac802154.h160
-rw-r--r--include/net/net_namespace.h5
-rw-r--r--include/net/netfilter/br_netfilter.h60
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h15
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h4
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/netns/sctp.h1
-rw-r--r--include/net/netns/x_tables.h2
-rw-r--r--include/net/nfc/hci.h7
-rw-r--r--include/net/nfc/nci.h1
-rw-r--r--include/net/nfc/nci_core.h71
-rw-r--r--include/net/nfc/nfc.h22
-rw-r--r--include/net/nl802154.h85
-rw-r--r--include/net/request_sock.h4
-rw-r--r--include/net/sch_generic.h23
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h36
-rw-r--r--include/net/switchdev.h257
-rw-r--r--include/net/tcp.h95
-rw-r--r--include/ras/ras_event.h85
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/rdma/ib_cache.h8
-rw-r--r--include/rdma/ib_mad.h41
-rw-r--r--include/rdma/ib_verbs.h408
-rw-r--r--include/rdma/iw_cm.h1
-rw-r--r--include/rdma/opa_smi.h106
-rw-r--r--include/rdma/rdma_cm.h2
-rw-r--r--include/scsi/scsi.h291
-rw-r--r--include/scsi/scsi_common.h64
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_eh.h31
-rw-r--r--include/scsi/scsi_proto.h281
-rw-r--r--include/scsi/scsi_transport_srp.h1
-rw-r--r--include/scsi/srp.h7
-rw-r--r--include/soc/at91/at91rm9200_sdramc.h63
-rw-r--r--include/soc/imx/revision.h37
-rw-r--r--include/soc/imx/timer.h26
-rw-r--r--include/soc/sa1100/pwer.h15
-rw-r--r--include/soc/tegra/emc.h19
-rw-r--r--include/soc/tegra/fuse.h1
-rw-r--r--include/soc/tegra/mc.h20
-rw-r--r--include/soc/tegra/pmc.h2
-rw-r--r--include/sound/control.h2
-rw-r--r--include/sound/core.h4
-rw-r--r--include/sound/dmaengine_pcm.h5
-rw-r--r--include/sound/emux_synth.h2
-rw-r--r--include/sound/hda_i915.h36
-rw-r--r--include/sound/hda_register.h244
-rw-r--r--include/sound/hdaudio.h309
-rw-r--r--include/sound/hdaudio_ext.h132
-rw-r--r--include/sound/info.h37
-rw-r--r--include/sound/jack.h13
-rw-r--r--include/sound/pcm.h5
-rw-r--r--include/sound/pcm_drm_eld.h6
-rw-r--r--include/sound/pcm_iec958.h9
-rw-r--r--include/sound/rt5645.h6
-rw-r--r--include/sound/soc-dapm.h49
-rw-r--r--include/sound/soc-topology.h168
-rw-r--r--include/sound/soc.h118
-rw-r--r--include/sound/tlv.h15
-rw-r--r--include/target/iscsi/iscsi_target_core.h14
-rw-r--r--include/target/target_core_backend.h72
-rw-r--r--include/target/target_core_backend_configfs.h118
-rw-r--r--include/target/target_core_base.h201
-rw-r--r--include/target/target_core_configfs.h48
-rw-r--r--include/target/target_core_fabric.h71
-rw-r--r--include/trace/define_trace.h3
-rw-r--r--include/trace/events/btrfs.h55
-rw-r--r--include/trace/events/ext4.h35
-rw-r--r--include/trace/events/f2fs.h33
-rw-r--r--include/trace/events/power.h27
-rw-r--r--include/trace/events/target.h2
-rw-r--r--include/trace/events/thermal.h58
-rw-r--r--include/trace/events/thermal_power_allocator.h87
-rw-r--r--include/trace/events/v4l2.h3
-rw-r--r--include/trace/events/writeback.h15
-rw-r--r--include/trace/perf.h350
-rw-r--r--include/trace/syscall.h6
-rw-r--r--include/trace/trace_events.h (renamed from include/trace/ftrace.h)413
-rw-r--r--include/uapi/drm/amdgpu_drm.h645
-rw-r--r--include/uapi/drm/drm.h2
-rw-r--r--include/uapi/drm/drm_fourcc.h15
-rw-r--r--include/uapi/drm/drm_mode.h20
-rw-r--r--include/uapi/drm/i915_drm.h17
-rw-r--r--include/uapi/drm/msm_drm.h76
-rw-r--r--include/uapi/drm/radeon_drm.h3
-rw-r--r--include/uapi/linux/Kbuild4
-rw-r--r--include/uapi/linux/bpf.h43
-rw-r--r--include/uapi/linux/can.h6
-rw-r--r--include/uapi/linux/can/gw.h5
-rw-r--r--include/uapi/linux/dcbnl.h10
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/dvb/dmx.h10
-rw-r--r--include/uapi/linux/dvb/frontend.h223
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/ethtool.h37
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--include/uapi/linux/gsmmux.h (renamed from include/linux/gsmmux.h)3
-rw-r--r--include/uapi/linux/hsi/cs-protocol.h16
-rw-r--r--include/uapi/linux/hyperv.h8
-rw-r--r--include/uapi/linux/i2c.h1
-rw-r--r--include/uapi/linux/if_link.h29
-rw-r--r--include/uapi/linux/if_packet.h7
-rw-r--r--include/uapi/linux/if_tun.h6
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/in.h19
-rw-r--r--include/uapi/linux/inet_diag.h4
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/ipv6_route.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h135
-rw-r--r--include/uapi/linux/kvm.h9
-rw-r--r--include/uapi/linux/libc-compat.h22
-rw-r--r--include/uapi/linux/mic_common.h12
-rw-r--r--include/uapi/linux/nbd.h2
-rw-r--r--include/uapi/linux/ndctl.h197
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter.h9
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h4
-rw-r--r--include/uapi/linux/netfilter/xt_socket.h8
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/uapi/linux/netlink.h16
-rw-r--r--include/uapi/linux/nfc.h10
-rw-r--r--include/uapi/linux/nfs4.h7
-rw-r--r--include/uapi/linux/nl80211.h28
-rw-r--r--include/uapi/linux/nvme.h5
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/pkt_cls.h57
-rw-r--r--include/uapi/linux/pkt_sched.h7
-rw-r--r--include/uapi/linux/rds.h10
-rw-r--r--include/uapi/linux/rtnetlink.h3
-rw-r--r--include/uapi/linux/scif_ioctl.h130
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/serial_reg.h3
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/sock_diag.h10
-rw-r--r--include/uapi/linux/tcp.h6
-rw-r--r--include/uapi/linux/tty.h1
-rw-r--r--include/uapi/linux/tty_flags.h2
-rw-r--r--include/uapi/linux/v4l2-mediabus.h4
-rw-r--r--include/uapi/linux/vfio.h102
-rw-r--r--include/uapi/linux/vhost.h14
-rw-r--r--include/uapi/linux/videodev2.h83
-rw-r--r--include/uapi/linux/virtio_gpu.h206
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_net.h16
-rw-r--r--include/uapi/linux/virtio_pci.h6
-rw-r--r--include/uapi/linux/virtio_ring.h5
-rw-r--r--include/uapi/misc/cxl.h22
-rw-r--r--include/uapi/rdma/ib_user_verbs.h19
-rw-r--r--include/uapi/sound/asoc.h401
-rw-r--r--include/uapi/sound/tlv.h31
-rw-r--r--include/video/exynos5433_decon.h165
-rw-r--r--include/video/neomagic.h5
-rw-r--r--include/video/tdfx.h2
568 files changed, 29033 insertions, 7392 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 273de709495c..b52c0dc4b492 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -51,6 +51,7 @@
51#define METHOD_NAME__BBN "_BBN" 51#define METHOD_NAME__BBN "_BBN"
52#define METHOD_NAME__CBA "_CBA" 52#define METHOD_NAME__CBA "_CBA"
53#define METHOD_NAME__CID "_CID" 53#define METHOD_NAME__CID "_CID"
54#define METHOD_NAME__CLS "_CLS"
54#define METHOD_NAME__CRS "_CRS" 55#define METHOD_NAME__CRS "_CRS"
55#define METHOD_NAME__DDN "_DDN" 56#define METHOD_NAME__DDN "_DDN"
56#define METHOD_NAME__HID "_HID" 57#define METHOD_NAME__HID "_HID"
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index a8f344363e77..f56de8c5d844 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -294,8 +294,12 @@
294 294
295/* DEBUG_PRINT functions */ 295/* DEBUG_PRINT functions */
296 296
297#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist 297#ifndef COMPILER_VA_MACRO
298#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist 298
299#define ACPI_DEBUG_PRINT(plist) acpi_debug_print plist
300#define ACPI_DEBUG_PRINT_RAW(plist) acpi_debug_print_raw plist
301
302#else
299 303
300/* Helper macros for DEBUG_PRINT */ 304/* Helper macros for DEBUG_PRINT */
301 305
@@ -315,6 +319,11 @@
315 ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \ 319 ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \
316 filename, modulename, component, __VA_ARGS__) 320 filename, modulename, component, __VA_ARGS__)
317 321
322#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist
323#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist
324
325#endif
326
318/* 327/*
319 * Function entry tracing 328 * Function entry tracing
320 * 329 *
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 8de4fa90e8c4..83061cac719b 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -208,7 +208,10 @@ struct acpi_device_flags {
208 u32 visited:1; 208 u32 visited:1;
209 u32 hotplug_notify:1; 209 u32 hotplug_notify:1;
210 u32 is_dock_station:1; 210 u32 is_dock_station:1;
211 u32 reserved:23; 211 u32 of_compatible_ok:1;
212 u32 coherent_dma:1;
213 u32 cca_seen:1;
214 u32 reserved:20;
212}; 215};
213 216
214/* File System */ 217/* File System */
@@ -271,7 +274,6 @@ struct acpi_device_power_flags {
271struct acpi_device_power_state { 274struct acpi_device_power_state {
272 struct { 275 struct {
273 u8 valid:1; 276 u8 valid:1;
274 u8 os_accessible:1;
275 u8 explicit_set:1; /* _PSx present? */ 277 u8 explicit_set:1; /* _PSx present? */
276 u8 reserved:6; 278 u8 reserved:6;
277 } flags; 279 } flags;
@@ -380,12 +382,45 @@ struct acpi_device {
380 void (*remove)(struct acpi_device *); 382 void (*remove)(struct acpi_device *);
381}; 383};
382 384
385static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
386{
387 bool ret = false;
388
389 if (!adev)
390 return ret;
391
392 /**
393 * Currently, we only support _CCA=1 (i.e. coherent_dma=1)
394 * This should be equivalent to specifyig dma-coherent for
395 * a device in OF.
396 *
397 * For the case when _CCA=0 (i.e. coherent_dma=0 && cca_seen=1),
398 * There are two cases:
399 * case 1. Do not support and disable DMA.
400 * case 2. Support but rely on arch-specific cache maintenance for
401 * non-coherence DMA operations.
402 * Currently, we implement case 1 above.
403 *
404 * For the case when _CCA is missing (i.e. cca_seen=0) and
405 * platform specifies ACPI_CCA_REQUIRED, we do not support DMA,
406 * and fallback to arch-specific default handling.
407 *
408 * See acpi_init_coherency() for more info.
409 */
410 if (adev->flags.coherent_dma) {
411 ret = true;
412 if (coherent)
413 *coherent = adev->flags.coherent_dma;
414 }
415 return ret;
416}
417
383static inline bool is_acpi_node(struct fwnode_handle *fwnode) 418static inline bool is_acpi_node(struct fwnode_handle *fwnode)
384{ 419{
385 return fwnode && fwnode->type == FWNODE_ACPI; 420 return fwnode && fwnode->type == FWNODE_ACPI;
386} 421}
387 422
388static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) 423static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode)
389{ 424{
390 return is_acpi_node(fwnode) ? 425 return is_acpi_node(fwnode) ?
391 container_of(fwnode, struct acpi_device, fwnode) : NULL; 426 container_of(fwnode, struct acpi_device, fwnode) : NULL;
@@ -601,7 +636,7 @@ static inline bool acpi_device_can_wakeup(struct acpi_device *adev)
601 636
602static inline bool acpi_device_can_poweroff(struct acpi_device *adev) 637static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
603{ 638{
604 return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible; 639 return adev->power.states[ACPI_STATE_D3_COLD].flags.valid;
605} 640}
606 641
607#else /* CONFIG_ACPI */ 642#else /* CONFIG_ACPI */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 0bc78df66d4b..d02df0a49d98 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -95,7 +95,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
95#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override 95#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
96acpi_status 96acpi_status
97acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 97acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
98 acpi_string * new_val); 98 char **new_val);
99#endif 99#endif
100 100
101#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override 101#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 08ef57bc8d63..e8ec18a4a634 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20150410 49#define ACPI_CA_VERSION 0x20150619
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -195,9 +195,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
195 * address. Although ACPICA adheres to the ACPI specification which 195 * address. Although ACPICA adheres to the ACPI specification which
196 * requires the use of the corresponding 64-bit address if it is non-zero, 196 * requires the use of the corresponding 64-bit address if it is non-zero,
197 * some machines have been found to have a corrupted non-zero 64-bit 197 * some machines have been found to have a corrupted non-zero 64-bit
198 * address. Default is TRUE, favor the 32-bit addresses. 198 * address. Default is FALSE, do not favor the 32-bit addresses.
199 */ 199 */
200ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE); 200ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
201
202/*
203 * Optionally use 32-bit FACS table addresses.
204 * It is reported that some platforms fail to resume from system suspending
205 * if 64-bit FACS table address is selected:
206 * https://bugzilla.kernel.org/show_bug.cgi?id=74021
207 * Default is TRUE, favor the 32-bit addresses.
208 */
209ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE);
201 210
202/* 211/*
203 * Optionally truncate I/O addresses to 16 bits. Provides compatibility 212 * Optionally truncate I/O addresses to 16 bits. Provides compatibility
@@ -220,6 +229,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE);
220ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_install, FALSE); 229ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_install, FALSE);
221 230
222/* 231/*
232 * Optionally enable runtime namespace override.
233 */
234ACPI_INIT_GLOBAL(u8, acpi_gbl_runtime_namespace_override, TRUE);
235
236/*
223 * We keep track of the latest version of Windows that has been requested by 237 * We keep track of the latest version of Windows that has been requested by
224 * the BIOS. ACPI 5.0. 238 * the BIOS. ACPI 5.0.
225 */ 239 */
@@ -814,8 +828,12 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
814ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state)) 828ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
815 829
816ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 830ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
817 acpi_set_firmware_waking_vector(u32 831 acpi_set_firmware_waking_vectors
818 physical_address)) 832 (acpi_physical_address physical_address,
833 acpi_physical_address physical_address64))
834ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
835 acpi_set_firmware_waking_vector(u32
836 physical_address))
819#if ACPI_MACHINE_WIDTH == 64 837#if ACPI_MACHINE_WIDTH == 64
820ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 838ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
821 acpi_set_firmware_waking_vector64(u64 839 acpi_set_firmware_waking_vector64(u64
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d4081fef1095..2d5faf508cad 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -65,6 +65,7 @@
65#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */ 65#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */
66#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */ 66#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */
67#define ACPI_SIG_FACS "FACS" /* Firmware ACPI Control Structure */ 67#define ACPI_SIG_FACS "FACS" /* Firmware ACPI Control Structure */
68#define ACPI_SIG_OSDT "OSDT" /* Override System Description Table */
68#define ACPI_SIG_PSDT "PSDT" /* Persistent System Description Table */ 69#define ACPI_SIG_PSDT "PSDT" /* Persistent System Description Table */
69#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Pointer */ 70#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Pointer */
70#define ACPI_SIG_RSDT "RSDT" /* Root System Description Table */ 71#define ACPI_SIG_RSDT "RSDT" /* Root System Description Table */
@@ -284,6 +285,7 @@ struct acpi_table_fadt {
284 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ 285 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
285 struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ 286 struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */
286 struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ 287 struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */
288 u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */
287}; 289};
288 290
289/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ 291/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -341,7 +343,7 @@ enum acpi_preferred_pm_profiles {
341 PM_TABLET = 8 343 PM_TABLET = 8
342}; 344};
343 345
344/* Values for sleep_status and sleep_control registers (V5 FADT) */ 346/* Values for sleep_status and sleep_control registers (V5+ FADT) */
345 347
346#define ACPI_X_WAKE_STATUS 0x80 348#define ACPI_X_WAKE_STATUS 0x80
347#define ACPI_X_SLEEP_TYPE_MASK 0x1C 349#define ACPI_X_SLEEP_TYPE_MASK 0x1C
@@ -398,15 +400,17 @@ struct acpi_table_desc {
398 * FADT is the bottom line as to what the version really is. 400 * FADT is the bottom line as to what the version really is.
399 * 401 *
400 * For reference, the values below are as follows: 402 * For reference, the values below are as follows:
401 * FADT V1 size: 0x074 403 * FADT V1 size: 0x074
402 * FADT V2 size: 0x084 404 * FADT V2 size: 0x084
403 * FADT V3 size: 0x0F4 405 * FADT V3 size: 0x0F4
404 * FADT V4 size: 0x0F4 406 * FADT V4 size: 0x0F4
405 * FADT V5 size: 0x10C 407 * FADT V5 size: 0x10C
408 * FADT V6 size: 0x114
406 */ 409 */
407#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) 410#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
408#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) 411#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
409#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) 412#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
410#define ACPI_FADT_V5_SIZE (u32) (sizeof (struct acpi_table_fadt)) 413#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
414#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
411 415
412#endif /* __ACTBL_H__ */ 416#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index b80b0e6dabc5..fcd570999f35 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -71,6 +71,7 @@
71#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ 71#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
72#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ 72#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
73#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ 73#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
74#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
74 75
75/* 76/*
76 * All tables must be byte-packed to match the ACPI specification, since 77 * All tables must be byte-packed to match the ACPI specification, since
@@ -673,7 +674,8 @@ enum acpi_madt_type {
673 ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, 674 ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
674 ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, 675 ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
675 ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, 676 ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
676 ACPI_MADT_TYPE_RESERVED = 15 /* 15 and greater are reserved */ 677 ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
678 ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
677}; 679};
678 680
679/* 681/*
@@ -794,7 +796,7 @@ struct acpi_madt_local_x2apic_nmi {
794 u8 reserved[3]; /* reserved - must be zero */ 796 u8 reserved[3]; /* reserved - must be zero */
795}; 797};
796 798
797/* 11: Generic Interrupt (ACPI 5.0) */ 799/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */
798 800
799struct acpi_madt_generic_interrupt { 801struct acpi_madt_generic_interrupt {
800 struct acpi_subtable_header header; 802 struct acpi_subtable_header header;
@@ -811,6 +813,8 @@ struct acpi_madt_generic_interrupt {
811 u32 vgic_interrupt; 813 u32 vgic_interrupt;
812 u64 gicr_base_address; 814 u64 gicr_base_address;
813 u64 arm_mpidr; 815 u64 arm_mpidr;
816 u8 efficiency_class;
817 u8 reserved2[3];
814}; 818};
815 819
816/* Masks for Flags field above */ 820/* Masks for Flags field above */
@@ -819,7 +823,7 @@ struct acpi_madt_generic_interrupt {
819#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */ 823#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
820#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */ 824#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
821 825
822/* 12: Generic Distributor (ACPI 5.0) */ 826/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
823 827
824struct acpi_madt_generic_distributor { 828struct acpi_madt_generic_distributor {
825 struct acpi_subtable_header header; 829 struct acpi_subtable_header header;
@@ -827,7 +831,19 @@ struct acpi_madt_generic_distributor {
827 u32 gic_id; 831 u32 gic_id;
828 u64 base_address; 832 u64 base_address;
829 u32 global_irq_base; 833 u32 global_irq_base;
830 u32 reserved2; /* reserved - must be zero */ 834 u8 version;
835 u8 reserved2[3]; /* reserved - must be zero */
836};
837
838/* Values for Version field above */
839
840enum acpi_madt_gic_version {
841 ACPI_MADT_GIC_VERSION_NONE = 0,
842 ACPI_MADT_GIC_VERSION_V1 = 1,
843 ACPI_MADT_GIC_VERSION_V2 = 2,
844 ACPI_MADT_GIC_VERSION_V3 = 3,
845 ACPI_MADT_GIC_VERSION_V4 = 4,
846 ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */
831}; 847};
832 848
833/* 13: Generic MSI Frame (ACPI 5.1) */ 849/* 13: Generic MSI Frame (ACPI 5.1) */
@@ -855,6 +871,16 @@ struct acpi_madt_generic_redistributor {
855 u32 length; 871 u32 length;
856}; 872};
857 873
874/* 15: Generic Translator (ACPI 6.0) */
875
876struct acpi_madt_generic_translator {
877 struct acpi_subtable_header header;
878 u16 reserved; /* reserved - must be zero */
879 u32 translation_id;
880 u64 base_address;
881 u32 reserved2;
882};
883
858/* 884/*
859 * Common flags fields for MADT subtables 885 * Common flags fields for MADT subtables
860 */ 886 */
@@ -908,6 +934,159 @@ struct acpi_msct_proximity {
908 934
909/******************************************************************************* 935/*******************************************************************************
910 * 936 *
937 * NFIT - NVDIMM Interface Table (ACPI 6.0)
938 * Version 1
939 *
940 ******************************************************************************/
941
942struct acpi_table_nfit {
943 struct acpi_table_header header; /* Common ACPI table header */
944 u32 reserved; /* Reserved, must be zero */
945};
946
947/* Subtable header for NFIT */
948
949struct acpi_nfit_header {
950 u16 type;
951 u16 length;
952};
953
954/* Values for subtable type in struct acpi_nfit_header */
955
956enum acpi_nfit_type {
957 ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0,
958 ACPI_NFIT_TYPE_MEMORY_MAP = 1,
959 ACPI_NFIT_TYPE_INTERLEAVE = 2,
960 ACPI_NFIT_TYPE_SMBIOS = 3,
961 ACPI_NFIT_TYPE_CONTROL_REGION = 4,
962 ACPI_NFIT_TYPE_DATA_REGION = 5,
963 ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
964 ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
965};
966
967/*
968 * NFIT Subtables
969 */
970
971/* 0: System Physical Address Range Structure */
972
973struct acpi_nfit_system_address {
974 struct acpi_nfit_header header;
975 u16 range_index;
976 u16 flags;
977 u32 reserved; /* Reseved, must be zero */
978 u32 proximity_domain;
979 u8 range_guid[16];
980 u64 address;
981 u64 length;
982 u64 memory_mapping;
983};
984
985/* Flags */
986
987#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
988#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
989
990/* Range Type GUIDs appear in the include/acuuid.h file */
991
992/* 1: Memory Device to System Address Range Map Structure */
993
994struct acpi_nfit_memory_map {
995 struct acpi_nfit_header header;
996 u32 device_handle;
997 u16 physical_id;
998 u16 region_id;
999 u16 range_index;
1000 u16 region_index;
1001 u64 region_size;
1002 u64 region_offset;
1003 u64 address;
1004 u16 interleave_index;
1005 u16 interleave_ways;
1006 u16 flags;
1007 u16 reserved; /* Reserved, must be zero */
1008};
1009
1010/* Flags */
1011
1012#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */
1013#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */
1014#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */
1015#define ACPI_NFIT_MEM_ARMED (1<<3) /* 03: Memory Device observed to be not armed */
1016#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
1017#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
1018
1019/* 2: Interleave Structure */
1020
1021struct acpi_nfit_interleave {
1022 struct acpi_nfit_header header;
1023 u16 interleave_index;
1024 u16 reserved; /* Reserved, must be zero */
1025 u32 line_count;
1026 u32 line_size;
1027 u32 line_offset[1]; /* Variable length */
1028};
1029
1030/* 3: SMBIOS Management Information Structure */
1031
1032struct acpi_nfit_smbios {
1033 struct acpi_nfit_header header;
1034 u32 reserved; /* Reserved, must be zero */
1035 u8 data[1]; /* Variable length */
1036};
1037
1038/* 4: NVDIMM Control Region Structure */
1039
1040struct acpi_nfit_control_region {
1041 struct acpi_nfit_header header;
1042 u16 region_index;
1043 u16 vendor_id;
1044 u16 device_id;
1045 u16 revision_id;
1046 u16 subsystem_vendor_id;
1047 u16 subsystem_device_id;
1048 u16 subsystem_revision_id;
1049 u8 reserved[6]; /* Reserved, must be zero */
1050 u32 serial_number;
1051 u16 code;
1052 u16 windows;
1053 u64 window_size;
1054 u64 command_offset;
1055 u64 command_size;
1056 u64 status_offset;
1057 u64 status_size;
1058 u16 flags;
1059 u8 reserved1[6]; /* Reserved, must be zero */
1060};
1061
1062/* Flags */
1063
1064#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
1065
1066/* 5: NVDIMM Block Data Window Region Structure */
1067
1068struct acpi_nfit_data_region {
1069 struct acpi_nfit_header header;
1070 u16 region_index;
1071 u16 windows;
1072 u64 offset;
1073 u64 size;
1074 u64 capacity;
1075 u64 start_address;
1076};
1077
1078/* 6: Flush Hint Address Structure */
1079
1080struct acpi_nfit_flush_address {
1081 struct acpi_nfit_header header;
1082 u32 device_handle;
1083 u16 hint_count;
1084 u8 reserved[6]; /* Reserved, must be zero */
1085 u64 hint_address[1]; /* Variable length */
1086};
1087
1088/*******************************************************************************
1089 *
911 * SBST - Smart Battery Specification Table 1090 * SBST - Smart Battery Specification Table
912 * Version 1 1091 * Version 1
913 * 1092 *
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index cafdeb50fbdf..a948fc586b9b 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -51,8 +51,8 @@
51 * These tables are not consumed directly by the ACPICA subsystem, but are 51 * These tables are not consumed directly by the ACPICA subsystem, but are
52 * included here to support device drivers and the AML disassembler. 52 * included here to support device drivers and the AML disassembler.
53 * 53 *
54 * The tables in this file are defined by third-party specifications, and are 54 * Generally, the tables in this file are defined by third-party specifications,
55 * not defined directly by the ACPI specification itself. 55 * and are not defined directly by the ACPI specification itself.
56 * 56 *
57 ******************************************************************************/ 57 ******************************************************************************/
58 58
@@ -69,6 +69,7 @@
69#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ 69#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
70#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ 70#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
71#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */ 71#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
72#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
72#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ 73#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
73#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */ 74#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
74#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ 75#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
@@ -79,6 +80,7 @@
79#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ 80#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
80#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ 81#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
81#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ 82#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
83#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
82#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ 84#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
83#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */ 85#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */
84#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ 86#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
@@ -650,6 +652,131 @@ struct acpi_ibft_target {
650 652
651/******************************************************************************* 653/*******************************************************************************
652 * 654 *
655 * IORT - IO Remapping Table
656 *
657 * Conforms to "IO Remapping Table System Software on ARM Platforms",
658 * Document number: ARM DEN 0049A, 2015
659 *
660 ******************************************************************************/
661
662struct acpi_table_iort {
663 struct acpi_table_header header;
664 u32 node_count;
665 u32 node_offset;
666 u32 reserved;
667};
668
669/*
670 * IORT subtables
671 */
672struct acpi_iort_node {
673 u8 type;
674 u16 length;
675 u8 revision;
676 u32 reserved;
677 u32 mapping_count;
678 u32 mapping_offset;
679 char node_data[1];
680};
681
682/* Values for subtable Type above */
683
684enum acpi_iort_node_type {
685 ACPI_IORT_NODE_ITS_GROUP = 0x00,
686 ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
687 ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
688 ACPI_IORT_NODE_SMMU = 0x03
689};
690
691struct acpi_iort_id_mapping {
692 u32 input_base; /* Lowest value in input range */
693 u32 id_count; /* Number of IDs */
694 u32 output_base; /* Lowest value in output range */
695 u32 output_reference; /* A reference to the output node */
696 u32 flags;
697};
698
699/* Masks for Flags field above for IORT subtable */
700
701#define ACPI_IORT_ID_SINGLE_MAPPING (1)
702
703struct acpi_iort_memory_access {
704 u32 cache_coherency;
705 u8 hints;
706 u16 reserved;
707 u8 memory_flags;
708};
709
710/* Values for cache_coherency field above */
711
712#define ACPI_IORT_NODE_COHERENT 0x00000001 /* The device node is fully coherent */
713#define ACPI_IORT_NODE_NOT_COHERENT 0x00000000 /* The device node is not coherent */
714
715/* Masks for Hints field above */
716
717#define ACPI_IORT_HT_TRANSIENT (1)
718#define ACPI_IORT_HT_WRITE (1<<1)
719#define ACPI_IORT_HT_READ (1<<2)
720#define ACPI_IORT_HT_OVERRIDE (1<<3)
721
722/* Masks for memory_flags field above */
723
724#define ACPI_IORT_MF_COHERENCY (1)
725#define ACPI_IORT_MF_ATTRIBUTES (1<<1)
726
727/*
728 * IORT node specific subtables
729 */
730struct acpi_iort_its_group {
731 u32 its_count;
732 u32 identifiers[1]; /* GIC ITS identifier arrary */
733};
734
735struct acpi_iort_named_component {
736 u32 node_flags;
737 u64 memory_properties; /* Memory access properties */
738 u8 memory_address_limit; /* Memory address size limit */
739 char device_name[1]; /* Path of namespace object */
740};
741
742struct acpi_iort_root_complex {
743 u64 memory_properties; /* Memory access properties */
744 u32 ats_attribute;
745 u32 pci_segment_number;
746};
747
748/* Values for ats_attribute field above */
749
750#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */
751#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */
752
753struct acpi_iort_smmu {
754 u64 base_address; /* SMMU base address */
755 u64 span; /* Length of memory range */
756 u32 model;
757 u32 flags;
758 u32 global_interrupt_offset;
759 u32 context_interrupt_count;
760 u32 context_interrupt_offset;
761 u32 pmu_interrupt_count;
762 u32 pmu_interrupt_offset;
763 u64 interrupts[1]; /* Interrupt array */
764};
765
766/* Values for Model field above */
767
768#define ACPI_IORT_SMMU_V1 0x00000000 /* Generic SMMUv1 */
769#define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */
770#define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */
771#define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */
772
773/* Masks for Flags field above */
774
775#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
776#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
777
778/*******************************************************************************
779 *
653 * IVRS - I/O Virtualization Reporting Structure 780 * IVRS - I/O Virtualization Reporting Structure
654 * Version 1 781 * Version 1
655 * 782 *
@@ -824,7 +951,7 @@ struct acpi_ivrs_memory {
824 * 951 *
825 * LPIT - Low Power Idle Table 952 * LPIT - Low Power Idle Table
826 * 953 *
827 * Conforms to "ACPI Low Power Idle Table (LPIT) and _LPD Proposal (DRAFT)" 954 * Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014.
828 * 955 *
829 ******************************************************************************/ 956 ******************************************************************************/
830 957
@@ -846,8 +973,7 @@ struct acpi_lpit_header {
846 973
847enum acpi_lpit_type { 974enum acpi_lpit_type {
848 ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00, 975 ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00,
849 ACPI_LPIT_TYPE_SIMPLE_IO = 0x01, 976 ACPI_LPIT_TYPE_RESERVED = 0x01 /* 1 and above are reserved */
850 ACPI_LPIT_TYPE_RESERVED = 0x02 /* 2 and above are reserved */
851}; 977};
852 978
853/* Masks for Flags field above */ 979/* Masks for Flags field above */
@@ -870,21 +996,6 @@ struct acpi_lpit_native {
870 u64 counter_frequency; 996 u64 counter_frequency;
871}; 997};
872 998
873/* 0x01: Simple I/O based LPI structure */
874
875struct acpi_lpit_io {
876 struct acpi_lpit_header header;
877 struct acpi_generic_address entry_trigger;
878 u32 trigger_action;
879 u64 trigger_value;
880 u64 trigger_mask;
881 struct acpi_generic_address minimum_idle_state;
882 u32 residency;
883 u32 latency;
884 struct acpi_generic_address residency_counter;
885 u64 counter_frequency;
886};
887
888/******************************************************************************* 999/*******************************************************************************
889 * 1000 *
890 * MCFG - PCI Memory Mapped Configuration table and subtable 1001 * MCFG - PCI Memory Mapped Configuration table and subtable
@@ -1069,20 +1180,85 @@ enum acpi_spmi_interface_types {
1069/******************************************************************************* 1180/*******************************************************************************
1070 * 1181 *
1071 * TCPA - Trusted Computing Platform Alliance table 1182 * TCPA - Trusted Computing Platform Alliance table
1072 * Version 1 1183 * Version 2
1073 * 1184 *
1074 * Conforms to "TCG PC Specific Implementation Specification", 1185 * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
1075 * Version 1.1, August 18, 2003 1186 * December 19, 2014
1187 *
1188 * NOTE: There are two versions of the table with the same signature --
1189 * the client version and the server version.
1076 * 1190 *
1077 ******************************************************************************/ 1191 ******************************************************************************/
1078 1192
1079struct acpi_table_tcpa { 1193struct acpi_table_tcpa_client {
1194 struct acpi_table_header header; /* Common ACPI table header */
1195 u16 platform_class;
1196 u32 minimum_log_length; /* Minimum length for the event log area */
1197 u64 log_address; /* Address of the event log area */
1198};
1199
1200struct acpi_table_tcpa_server {
1080 struct acpi_table_header header; /* Common ACPI table header */ 1201 struct acpi_table_header header; /* Common ACPI table header */
1202 u16 platform_class;
1081 u16 reserved; 1203 u16 reserved;
1082 u32 max_log_length; /* Maximum length for the event log area */ 1204 u64 minimum_log_length; /* Minimum length for the event log area */
1083 u64 log_address; /* Address of the event log area */ 1205 u64 log_address; /* Address of the event log area */
1206 u16 spec_revision;
1207 u8 device_flags;
1208 u8 interrupt_flags;
1209 u8 gpe_number;
1210 u8 reserved2[3];
1211 u32 global_interrupt;
1212 struct acpi_generic_address address;
1213 u32 reserved3;
1214 struct acpi_generic_address config_address;
1215 u8 group;
1216 u8 bus; /* PCI Bus/Segment/Function numbers */
1217 u8 device;
1218 u8 function;
1219};
1220
1221/* Values for device_flags above */
1222
1223#define ACPI_TCPA_PCI_DEVICE (1)
1224#define ACPI_TCPA_BUS_PNP (1<<1)
1225#define ACPI_TCPA_ADDRESS_VALID (1<<2)
1226
1227/* Values for interrupt_flags above */
1228
1229#define ACPI_TCPA_INTERRUPT_MODE (1)
1230#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1)
1231#define ACPI_TCPA_SCI_VIA_GPE (1<<2)
1232#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3)
1233
1234/*******************************************************************************
1235 *
1236 * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
1237 * Version 4
1238 *
1239 * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
1240 * December 19, 2014
1241 *
1242 ******************************************************************************/
1243
1244struct acpi_table_tpm2 {
1245 struct acpi_table_header header; /* Common ACPI table header */
1246 u16 platform_class;
1247 u16 reserved;
1248 u64 control_address;
1249 u32 start_method;
1250
1251 /* Platform-specific data follows */
1084}; 1252};
1085 1253
1254/* Values for start_method above */
1255
1256#define ACPI_TPM2_NOT_ALLOWED 0
1257#define ACPI_TPM2_START_METHOD 2
1258#define ACPI_TPM2_MEMORY_MAPPED 6
1259#define ACPI_TPM2_COMMAND_BUFFER 7
1260#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
1261
1086/******************************************************************************* 1262/*******************************************************************************
1087 * 1263 *
1088 * UEFI - UEFI Boot optimization Table 1264 * UEFI - UEFI Boot optimization Table
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 440ca8104b43..1df891660f43 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -51,7 +51,8 @@
51 * These tables are not consumed directly by the ACPICA subsystem, but are 51 * These tables are not consumed directly by the ACPICA subsystem, but are
52 * included here to support device drivers and the AML disassembler. 52 * included here to support device drivers and the AML disassembler.
53 * 53 *
54 * The tables in this file are fully defined within the ACPI specification. 54 * In general, the tables in this file are fully defined within the ACPI
55 * specification.
55 * 56 *
56 ******************************************************************************/ 57 ******************************************************************************/
57 58
@@ -68,7 +69,9 @@
68#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ 69#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
69#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ 70#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
70#define ACPI_SIG_RASF "RASF" /* RAS Feature table */ 71#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
71#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */ 72#define ACPI_SIG_STAO "STAO" /* Status Override table */
73#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
74#define ACPI_SIG_XENV "XENV" /* Xen Environment table */
72 75
73#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */ 76#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
74#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */ 77#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
@@ -77,7 +80,6 @@
77 80
78#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */ 81#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
79#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ 82#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
80#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
81 83
82/* 84/*
83 * All tables must be byte-packed to match the ACPI specification, since 85 * All tables must be byte-packed to match the ACPI specification, since
@@ -117,6 +119,8 @@ struct acpi_table_bgrt {
117/******************************************************************************* 119/*******************************************************************************
118 * 120 *
119 * DRTM - Dynamic Root of Trust for Measurement table 121 * DRTM - Dynamic Root of Trust for Measurement table
122 * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0
123 * Table version 1
120 * 124 *
121 ******************************************************************************/ 125 ******************************************************************************/
122 126
@@ -133,22 +137,40 @@ struct acpi_table_drtm {
133 u32 flags; 137 u32 flags;
134}; 138};
135 139
136/* 1) Validated Tables List */ 140/* Flag Definitions for above */
141
142#define ACPI_DRTM_ACCESS_ALLOWED (1)
143#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1)
144#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2)
145#define ACPI_DRTM_AUTHORITY_ORDER (1<<3)
146
147/* 1) Validated Tables List (64-bit addresses) */
137 148
138struct acpi_drtm_vtl_list { 149struct acpi_drtm_vtable_list {
139 u32 validated_table_list_count; 150 u32 validated_table_count;
151 u64 validated_tables[1];
140}; 152};
141 153
142/* 2) Resources List */ 154/* 2) Resources List (of Resource Descriptors) */
155
156/* Resource Descriptor */
157
158struct acpi_drtm_resource {
159 u8 size[7];
160 u8 type;
161 u64 address;
162};
143 163
144struct acpi_drtm_resource_list { 164struct acpi_drtm_resource_list {
145 u32 resource_list_count; 165 u32 resource_count;
166 struct acpi_drtm_resource resources[1];
146}; 167};
147 168
148/* 3) Platform-specific Identifiers List */ 169/* 3) Platform-specific Identifiers List */
149 170
150struct acpi_drtm_id_list { 171struct acpi_drtm_dps_id {
151 u32 id_list_count; 172 u32 dps_id_length;
173 u8 dps_id[16];
152}; 174};
153 175
154/******************************************************************************* 176/*******************************************************************************
@@ -685,32 +707,52 @@ enum acpi_rasf_status {
685 707
686/******************************************************************************* 708/*******************************************************************************
687 * 709 *
688 * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table 710 * STAO - Status Override Table (_STA override) - ACPI 6.0
689 * Version 3 711 * Version 1
690 * 712 *
691 * Conforms to "TPM 2.0 Hardware Interface Table (TPM2)" 29 November 2011 713 * Conforms to "ACPI Specification for Status Override Table"
714 * 6 January 2015
692 * 715 *
693 ******************************************************************************/ 716 ******************************************************************************/
694 717
695struct acpi_table_tpm2 { 718struct acpi_table_stao {
696 struct acpi_table_header header; /* Common ACPI table header */ 719 struct acpi_table_header header; /* Common ACPI table header */
697 u32 flags; 720 u8 ignore_uart;
698 u64 control_address; 721};
699 u32 start_method; 722
723/*******************************************************************************
724 *
725 * WPBT - Windows Platform Environment Table (ACPI 6.0)
726 * Version 1
727 *
728 * Conforms to "Windows Platform Binary Table (WPBT)" 29 November 2011
729 *
730 ******************************************************************************/
731
732struct acpi_table_wpbt {
733 struct acpi_table_header header; /* Common ACPI table header */
734 u32 handoff_size;
735 u64 handoff_address;
736 u8 layout;
737 u8 type;
738 u16 arguments_length;
700}; 739};
701 740
702/* Control area structure (not part of table, pointed to by control_address) */ 741/*******************************************************************************
742 *
743 * XENV - Xen Environment Table (ACPI 6.0)
744 * Version 1
745 *
746 * Conforms to "ACPI Specification for Xen Environment Table" 4 January 2015
747 *
748 ******************************************************************************/
703 749
704struct acpi_tpm2_control { 750struct acpi_table_xenv {
705 u32 reserved; 751 struct acpi_table_header header; /* Common ACPI table header */
706 u32 error; 752 u64 grant_table_address;
707 u32 cancel; 753 u64 grant_table_size;
708 u32 start; 754 u32 event_interrupt;
709 u64 interrupt_control; 755 u8 event_flags;
710 u32 command_size;
711 u64 command_address;
712 u32 response_size;
713 u64 response_address;
714}; 756};
715 757
716/* Reset to default packing */ 758/* Reset to default packing */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 1c3002e1db20..c2a41d223162 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -471,11 +471,6 @@ typedef u8 acpi_owner_id;
471 471
472#define ACPI_INTEGER_BIT_SIZE 64 472#define ACPI_INTEGER_BIT_SIZE 64
473#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ 473#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */
474
475#if ACPI_MACHINE_WIDTH == 64
476#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */
477#endif
478
479#define ACPI_MAX64_DECIMAL_DIGITS 20 474#define ACPI_MAX64_DECIMAL_DIGITS 20
480#define ACPI_MAX32_DECIMAL_DIGITS 10 475#define ACPI_MAX32_DECIMAL_DIGITS 10
481#define ACPI_MAX16_DECIMAL_DIGITS 5 476#define ACPI_MAX16_DECIMAL_DIGITS 5
@@ -530,6 +525,7 @@ typedef u64 acpi_integer;
530#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) 525#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))
531#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) 526#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
532#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) 527#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
528#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b)))
533#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) 529#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
534 530
535/* Pointer/Integer type conversions */ 531/* Pointer/Integer type conversions */
@@ -546,14 +542,14 @@ typedef u64 acpi_integer;
546#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) 542#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
547#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) 543#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
548#else 544#else
549#define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) 545#define ACPI_COMPARE_NAME(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
550#define ACPI_MOVE_NAME(dest,src) (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE)) 546#define ACPI_MOVE_NAME(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
551#endif 547#endif
552 548
553/* Support for the special RSDP signature (8 characters) */ 549/* Support for the special RSDP signature (8 characters) */
554 550
555#define ACPI_VALIDATE_RSDP_SIG(a) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8)) 551#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
556#define ACPI_MAKE_RSDP_SIG(dest) (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) 552#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
557 553
558/******************************************************************************* 554/*******************************************************************************
559 * 555 *
@@ -572,6 +568,7 @@ typedef u64 acpi_integer;
572#define ACPI_NO_ACPI_ENABLE 0x10 568#define ACPI_NO_ACPI_ENABLE 0x10
573#define ACPI_NO_DEVICE_INIT 0x20 569#define ACPI_NO_DEVICE_INIT 0x20
574#define ACPI_NO_OBJECT_INIT 0x40 570#define ACPI_NO_OBJECT_INIT 0x40
571#define ACPI_NO_FACS_INIT 0x80
575 572
576/* 573/*
577 * Initialization state 574 * Initialization state
@@ -1144,6 +1141,10 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
1144 1141
1145#define ACPI_UUID_LENGTH 16 1142#define ACPI_UUID_LENGTH 16
1146 1143
1144/* Length of 3-byte PCI class code values when converted back to a string */
1145
1146#define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */
1147
1147/* Structures used for device/processor HID, UID, CID, and SUB */ 1148/* Structures used for device/processor HID, UID, CID, and SUB */
1148 1149
1149struct acpi_pnp_device_id { 1150struct acpi_pnp_device_id {
@@ -1166,7 +1167,7 @@ struct acpi_device_info {
1166 u32 name; /* ACPI object Name */ 1167 u32 name; /* ACPI object Name */
1167 acpi_object_type type; /* ACPI object Type */ 1168 acpi_object_type type; /* ACPI object Type */
1168 u8 param_count; /* If a method, required parameter count */ 1169 u8 param_count; /* If a method, required parameter count */
1169 u8 valid; /* Indicates which optional fields are valid */ 1170 u16 valid; /* Indicates which optional fields are valid */
1170 u8 flags; /* Miscellaneous info */ 1171 u8 flags; /* Miscellaneous info */
1171 u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ 1172 u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
1172 u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ 1173 u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */
@@ -1175,6 +1176,7 @@ struct acpi_device_info {
1175 struct acpi_pnp_device_id hardware_id; /* _HID value */ 1176 struct acpi_pnp_device_id hardware_id; /* _HID value */
1176 struct acpi_pnp_device_id unique_id; /* _UID value */ 1177 struct acpi_pnp_device_id unique_id; /* _UID value */
1177 struct acpi_pnp_device_id subsystem_id; /* _SUB value */ 1178 struct acpi_pnp_device_id subsystem_id; /* _SUB value */
1179 struct acpi_pnp_device_id class_code; /* _CLS value */
1178 struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */ 1180 struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */
1179}; 1181};
1180 1182
@@ -1184,14 +1186,15 @@ struct acpi_device_info {
1184 1186
1185/* Flags for Valid field above (acpi_get_object_info) */ 1187/* Flags for Valid field above (acpi_get_object_info) */
1186 1188
1187#define ACPI_VALID_STA 0x01 1189#define ACPI_VALID_STA 0x0001
1188#define ACPI_VALID_ADR 0x02 1190#define ACPI_VALID_ADR 0x0002
1189#define ACPI_VALID_HID 0x04 1191#define ACPI_VALID_HID 0x0004
1190#define ACPI_VALID_UID 0x08 1192#define ACPI_VALID_UID 0x0008
1191#define ACPI_VALID_SUB 0x10 1193#define ACPI_VALID_SUB 0x0010
1192#define ACPI_VALID_CID 0x20 1194#define ACPI_VALID_CID 0x0020
1193#define ACPI_VALID_SXDS 0x40 1195#define ACPI_VALID_CLS 0x0040
1194#define ACPI_VALID_SXWS 0x80 1196#define ACPI_VALID_SXDS 0x0100
1197#define ACPI_VALID_SXWS 0x0200
1195 1198
1196/* Flags for _STA return value (current_status above) */ 1199/* Flags for _STA return value (current_status above) */
1197 1200
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
new file mode 100644
index 000000000000..80fe8cf74d7a
--- /dev/null
+++ b/include/acpi/acuuid.h
@@ -0,0 +1,89 @@
1/******************************************************************************
2 *
3 * Name: acuuid.h - ACPI-related UUID/GUID definitions
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACUUID_H__
45#define __ACUUID_H__
46
47/*
48 * Note1: UUIDs and GUIDs are defined to be identical in ACPI.
49 *
50 * Note2: This file is standalone and should remain that way.
51 */
52
53/* Controllers */
54
55#define UUID_GPIO_CONTROLLER "4f248f40-d5e2-499f-834c-27758ea1cd3f"
56#define UUID_USB_CONTROLLER "ce2ee385-00e6-48cb-9f05-2edb927c4899"
57#define UUID_SATA_CONTROLLER "e4db149b-fcfe-425b-a6d8-92357d78fc7f"
58
59/* Devices */
60
61#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766"
62#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de"
63#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c"
64
65/* Interfaces */
66
67#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d"
68#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653"
69
70/* NVDIMM - NFIT table */
71
72#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0"
73#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb"
74#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c"
75#define UUID_DATA_REGION "91af0530-5d86-470e-a6b0-0a2db9408249"
76#define UUID_VOLATILE_VIRTUAL_DISK "77ab535a-45fc-624b-5560-f7b281d1f96e"
77#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb"
78#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9"
79#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d"
80
81/* Miscellaneous */
82
83#define UUID_PLATFORM_CAPABILITIES "0811b06e-4a27-44f9-8d60-3cbbc22e7b48"
84#define UUID_DYNAMIC_ENUMERATION "d8c1a3a6-be9b-4c9b-91bf-c3cb81fc5daf"
85#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e"
86#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500"
87#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301"
88
89#endif /* __AUUID_H__ */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ecdf9405dd3a..3cedd43943f4 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -175,6 +175,9 @@
175#elif defined(_APPLE) || defined(__APPLE__) 175#elif defined(_APPLE) || defined(__APPLE__)
176#include "acmacosx.h" 176#include "acmacosx.h"
177 177
178#elif defined(__DragonFly__)
179#include "acdragonfly.h"
180
178#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 181#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
179#include "acfreebsd.h" 182#include "acfreebsd.h"
180 183
@@ -343,29 +346,6 @@
343 346
344/* We will be linking to the standard Clib functions */ 347/* We will be linking to the standard Clib functions */
345 348
346#define ACPI_STRSTR(s1,s2) strstr((s1), (s2))
347#define ACPI_STRCHR(s1,c) strchr((s1), (c))
348#define ACPI_STRLEN(s) (acpi_size) strlen((s))
349#define ACPI_STRCPY(d,s) (void) strcpy((d), (s))
350#define ACPI_STRNCPY(d,s,n) (void) strncpy((d), (s), (acpi_size)(n))
351#define ACPI_STRNCMP(d,s,n) strncmp((d), (s), (acpi_size)(n))
352#define ACPI_STRCMP(d,s) strcmp((d), (s))
353#define ACPI_STRCAT(d,s) (void) strcat((d), (s))
354#define ACPI_STRNCAT(d,s,n) strncat((d), (s), (acpi_size)(n))
355#define ACPI_STRTOUL(d,s,n) strtoul((d), (s), (acpi_size)(n))
356#define ACPI_MEMCMP(s1,s2,n) memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
357#define ACPI_MEMCPY(d,s,n) (void) memcpy((d), (s), (acpi_size)(n))
358#define ACPI_MEMSET(d,s,n) (void) memset((d), (s), (acpi_size)(n))
359
360#define ACPI_TOUPPER(i) toupper((int) (i))
361#define ACPI_TOLOWER(i) tolower((int) (i))
362#define ACPI_IS_XDIGIT(i) isxdigit((int) (i))
363#define ACPI_IS_DIGIT(i) isdigit((int) (i))
364#define ACPI_IS_SPACE(i) isspace((int) (i))
365#define ACPI_IS_UPPER(i) isupper((int) (i))
366#define ACPI_IS_PRINT(i) isprint((int) (i))
367#define ACPI_IS_ALPHA(i) isalpha((int) (i))
368
369#else 349#else
370 350
371/****************************************************************************** 351/******************************************************************************
@@ -403,22 +383,6 @@ typedef char *va_list;
403 383
404/* Use the local (ACPICA) definitions of the clib functions */ 384/* Use the local (ACPICA) definitions of the clib functions */
405 385
406#define ACPI_STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2))
407#define ACPI_STRCHR(s1,c) acpi_ut_strchr ((s1), (c))
408#define ACPI_STRLEN(s) (acpi_size) acpi_ut_strlen ((s))
409#define ACPI_STRCPY(d,s) (void) acpi_ut_strcpy ((d), (s))
410#define ACPI_STRNCPY(d,s,n) (void) acpi_ut_strncpy ((d), (s), (acpi_size)(n))
411#define ACPI_STRNCMP(d,s,n) acpi_ut_strncmp ((d), (s), (acpi_size)(n))
412#define ACPI_STRCMP(d,s) acpi_ut_strcmp ((d), (s))
413#define ACPI_STRCAT(d,s) (void) acpi_ut_strcat ((d), (s))
414#define ACPI_STRNCAT(d,s,n) acpi_ut_strncat ((d), (s), (acpi_size)(n))
415#define ACPI_STRTOUL(d,s,n) acpi_ut_strtoul ((d), (s), (acpi_size)(n))
416#define ACPI_MEMCMP(s1,s2,n) acpi_ut_memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
417#define ACPI_MEMCPY(d,s,n) (void) acpi_ut_memcpy ((d), (s), (acpi_size)(n))
418#define ACPI_MEMSET(d,v,n) (void) acpi_ut_memset ((d), (v), (acpi_size)(n))
419#define ACPI_TOUPPER(c) acpi_ut_to_upper ((int) (c))
420#define ACPI_TOLOWER(c) acpi_ut_to_lower ((int) (c))
421
422#endif /* ACPI_USE_SYSTEM_CLIBRARY */ 386#endif /* ACPI_USE_SYSTEM_CLIBRARY */
423 387
424#ifndef ACPI_FILE 388#ifndef ACPI_FILE
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 71e5ec5b07a3..0a7dc8e583b1 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,6 +56,15 @@
56#if defined(_LINUX) || defined(__linux__) 56#if defined(_LINUX) || defined(__linux__)
57#include <acpi/platform/aclinuxex.h> 57#include <acpi/platform/aclinuxex.h>
58 58
59#elif defined(_AED_EFI)
60#include "acefiex.h"
61
62#elif defined(_GNU_EFI)
63#include "acefiex.h"
64
65#elif defined(__DragonFly__)
66#include "acdragonflyex.h"
67
59#endif 68#endif
60 69
61/*! [End] no source code translation !*/ 70/*! [End] no source code translation !*/
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index f54de0a63558..5457a06cb528 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -75,4 +75,8 @@
75#undef strchr 75#undef strchr
76#endif 76#endif
77 77
78/* GCC supports __VA_ARGS__ in macros */
79
80#define COMPILER_VA_MACRO 1
81
78#endif /* __ACGCC_H__ */ 82#endif /* __ACGCC_H__ */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 843ef1adfbfa..e840b294c6f5 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -16,23 +16,36 @@ struct acpi_device;
16#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110 16#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
17#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 17#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
18 18
19enum acpi_backlight_type {
20 acpi_backlight_undef = -1,
21 acpi_backlight_none = 0,
22 acpi_backlight_video,
23 acpi_backlight_vendor,
24 acpi_backlight_native,
25};
26
19#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) 27#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
20extern int acpi_video_register(void); 28extern int acpi_video_register(void);
21extern void acpi_video_unregister(void); 29extern void acpi_video_unregister(void);
22extern void acpi_video_unregister_backlight(void);
23extern int acpi_video_get_edid(struct acpi_device *device, int type, 30extern int acpi_video_get_edid(struct acpi_device *device, int type,
24 int device_id, void **edid); 31 int device_id, void **edid);
25extern bool acpi_video_verify_backlight_support(void); 32extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
33extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
26#else 34#else
27static inline int acpi_video_register(void) { return 0; } 35static inline int acpi_video_register(void) { return 0; }
28static inline void acpi_video_unregister(void) { return; } 36static inline void acpi_video_unregister(void) { return; }
29static inline void acpi_video_unregister_backlight(void) { return; }
30static inline int acpi_video_get_edid(struct acpi_device *device, int type, 37static inline int acpi_video_get_edid(struct acpi_device *device, int type,
31 int device_id, void **edid) 38 int device_id, void **edid)
32{ 39{
33 return -ENODEV; 40 return -ENODEV;
34} 41}
35static inline bool acpi_video_verify_backlight_support(void) { return false; } 42static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
43{
44 return acpi_backlight_vendor;
45}
46static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
47{
48}
36#endif 49#endif
37 50
38#endif 51#endif
diff --git a/include/asm-generic/asm-offsets.h b/include/asm-generic/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/include/asm-generic/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index e6a83d712ef6..55e3abc2d027 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -55,17 +55,43 @@
55#endif 55#endif
56 56
57#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
58
59#ifndef smp_mb
58#define smp_mb() mb() 60#define smp_mb() mb()
61#endif
62
63#ifndef smp_rmb
59#define smp_rmb() rmb() 64#define smp_rmb() rmb()
65#endif
66
67#ifndef smp_wmb
60#define smp_wmb() wmb() 68#define smp_wmb() wmb()
69#endif
70
71#ifndef smp_read_barrier_depends
61#define smp_read_barrier_depends() read_barrier_depends() 72#define smp_read_barrier_depends() read_barrier_depends()
62#else 73#endif
74
75#else /* !CONFIG_SMP */
76
77#ifndef smp_mb
63#define smp_mb() barrier() 78#define smp_mb() barrier()
79#endif
80
81#ifndef smp_rmb
64#define smp_rmb() barrier() 82#define smp_rmb() barrier()
83#endif
84
85#ifndef smp_wmb
65#define smp_wmb() barrier() 86#define smp_wmb() barrier()
87#endif
88
89#ifndef smp_read_barrier_depends
66#define smp_read_barrier_depends() do { } while (0) 90#define smp_read_barrier_depends() do { } while (0)
67#endif 91#endif
68 92
93#endif /* CONFIG_SMP */
94
69#ifndef smp_store_mb 95#ifndef smp_store_mb
70#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) 96#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
71#endif 97#endif
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
new file mode 100644
index 000000000000..5ff0e5193f85
--- /dev/null
+++ b/include/asm-generic/mm-arch-hooks.h
@@ -0,0 +1,16 @@
1/*
2 * Architecture specific mm hooks
3 */
4
5#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
6#define _ASM_GENERIC_MM_ARCH_HOOKS_H
7
8/*
9 * This file should be included through arch/../include/asm/Kbuild for
10 * the architecture which doesn't need specific mm hooks.
11 *
12 * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
13 * are used.
14 */
15
16#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index bd910ceaccfa..29c57b2cb344 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -96,11 +96,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
96} 96}
97#endif 97#endif
98 98
99#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR 99#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
100#ifdef CONFIG_TRANSPARENT_HUGEPAGE 100#ifdef CONFIG_TRANSPARENT_HUGEPAGE
101static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 101static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
102 unsigned long address, 102 unsigned long address,
103 pmd_t *pmdp) 103 pmd_t *pmdp)
104{ 104{
105 pmd_t pmd = *pmdp; 105 pmd_t pmd = *pmdp;
106 pmd_clear(pmdp); 106 pmd_clear(pmdp);
@@ -109,13 +109,13 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
109#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 109#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
110#endif 110#endif
111 111
112#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL 112#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
113#ifdef CONFIG_TRANSPARENT_HUGEPAGE 113#ifdef CONFIG_TRANSPARENT_HUGEPAGE
114static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, 114static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
115 unsigned long address, pmd_t *pmdp, 115 unsigned long address, pmd_t *pmdp,
116 int full) 116 int full)
117{ 117{
118 return pmdp_get_and_clear(mm, address, pmdp); 118 return pmdp_huge_get_and_clear(mm, address, pmdp);
119} 119}
120#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 120#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
121#endif 121#endif
@@ -152,8 +152,8 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
152 pte_t *ptep); 152 pte_t *ptep);
153#endif 153#endif
154 154
155#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH 155#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
156extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 156extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
157 unsigned long address, 157 unsigned long address,
158 pmd_t *pmdp); 158 pmd_t *pmdp);
159#endif 159#endif
@@ -189,6 +189,22 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma,
189 unsigned long address, pmd_t *pmdp); 189 unsigned long address, pmd_t *pmdp);
190#endif 190#endif
191 191
192#ifndef pmdp_collapse_flush
193#ifdef CONFIG_TRANSPARENT_HUGEPAGE
194extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
195 unsigned long address, pmd_t *pmdp);
196#else
197static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
198 unsigned long address,
199 pmd_t *pmdp)
200{
201 BUILD_BUG();
202 return *pmdp;
203}
204#define pmdp_collapse_flush pmdp_collapse_flush
205#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
206#endif
207
192#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 208#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
193extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 209extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
194 pgtable_t pgtable); 210 pgtable_t pgtable);
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
deleted file mode 100644
index 5de07355fad4..000000000000
--- a/include/asm-generic/scatterlist.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#ifndef __ASM_GENERIC_SCATTERLIST_H
2#define __ASM_GENERIC_SCATTERLIST_H
3
4#include <linux/types.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 unsigned int length;
13 dma_addr_t dma_address;
14#ifdef CONFIG_NEED_SG_DMA_LENGTH
15 unsigned int dma_length;
16#endif
17};
18
19/*
20 * These macros should be used after a dma_map_sg call has been done
21 * to get bus addresses of each of the SG entries and their lengths.
22 * You should only work with the number of sg entries pci_map_sg
23 * returns, or alternatively stop on the first sg_dma_len(sg) which
24 * is 0.
25 */
26#define sg_dma_address(sg) ((sg)->dma_address)
27
28#ifdef CONFIG_NEED_SG_DMA_LENGTH
29#define sg_dma_len(sg) ((sg)->dma_length)
30#else
31#define sg_dma_len(sg) ((sg)->length)
32#endif
33
34#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h
new file mode 100644
index 000000000000..1f8a1caa7cb4
--- /dev/null
+++ b/include/clocksource/timer-sp804.h
@@ -0,0 +1,28 @@
1#ifndef __CLKSOURCE_TIMER_SP804_H
2#define __CLKSOURCE_TIMER_SP804_H
3
4struct clk;
5
6void __sp804_clocksource_and_sched_clock_init(void __iomem *,
7 const char *, struct clk *, int);
8void __sp804_clockevents_init(void __iomem *, unsigned int,
9 struct clk *, const char *);
10void sp804_timer_disable(void __iomem *);
11
12static inline void sp804_clocksource_init(void __iomem *base, const char *name)
13{
14 __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0);
15}
16
17static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base,
18 const char *name)
19{
20 __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1);
21}
22
23static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name)
24{
25 __sp804_clockevents_init(base, irq, NULL, name);
26
27}
28#endif
diff --git a/include/drm/bridge/ptn3460.h b/include/drm/bridge/ptn3460.h
deleted file mode 100644
index b11f8e17e72f..000000000000
--- a/include/drm/bridge/ptn3460.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (C) 2013 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _DRM_BRIDGE_PTN3460_H_
15#define _DRM_BRIDGE_PTN3460_H_
16
17struct drm_device;
18struct drm_bridge;
19struct drm_encoder;
20struct i2c_client;
21struct device_node;
22
23#if defined(CONFIG_DRM_PTN3460) || defined(CONFIG_DRM_PTN3460_MODULE)
24
25int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
26 struct i2c_client *client, struct device_node *node);
27
28void ptn3460_destroy(struct drm_bridge *bridge);
29
30#else
31
32static inline int ptn3460_init(struct drm_device *dev,
33 struct drm_encoder *encoder, struct i2c_client *client,
34 struct device_node *node)
35{
36 return 0;
37}
38
39static inline void ptn3460_destroy(struct drm_bridge *bridge)
40{
41}
42
43#endif
44
45#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 62c40777c009..5aa519711e0b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -326,6 +326,10 @@ struct drm_file {
326 struct list_head fbs; 326 struct list_head fbs;
327 struct mutex fbs_lock; 327 struct mutex fbs_lock;
328 328
329 /** User-created blob properties; this retains a reference on the
330 * property. */
331 struct list_head blobs;
332
329 wait_queue_head_t event_wait; 333 wait_queue_head_t event_wait;
330 struct list_head event_list; 334 struct list_head event_list;
331 int event_space; 335 int event_space;
@@ -355,8 +359,7 @@ struct drm_lock_data {
355 * @minor: Link back to minor char device we are master for. Immutable. 359 * @minor: Link back to minor char device we are master for. Immutable.
356 * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. 360 * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
357 * @unique_len: Length of unique field. Protected by drm_global_mutex. 361 * @unique_len: Length of unique field. Protected by drm_global_mutex.
358 * @magiclist: Hash of used authentication tokens. Protected by struct_mutex. 362 * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
359 * @magicfree: List of used authentication tokens. Protected by struct_mutex.
360 * @lock: DRI lock information. 363 * @lock: DRI lock information.
361 * @driver_priv: Pointer to driver-private information. 364 * @driver_priv: Pointer to driver-private information.
362 */ 365 */
@@ -365,8 +368,7 @@ struct drm_master {
365 struct drm_minor *minor; 368 struct drm_minor *minor;
366 char *unique; 369 char *unique;
367 int unique_len; 370 int unique_len;
368 struct drm_open_hash magiclist; 371 struct idr magic_map;
369 struct list_head magicfree;
370 struct drm_lock_data lock; 372 struct drm_lock_data lock;
371 void *driver_priv; 373 void *driver_priv;
372}; 374};
@@ -686,9 +688,13 @@ struct drm_pending_vblank_event {
686struct drm_vblank_crtc { 688struct drm_vblank_crtc {
687 struct drm_device *dev; /* pointer to the drm_device */ 689 struct drm_device *dev; /* pointer to the drm_device */
688 wait_queue_head_t queue; /**< VBLANK wait queue */ 690 wait_queue_head_t queue; /**< VBLANK wait queue */
689 struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
690 struct timer_list disable_timer; /* delayed disable timer */ 691 struct timer_list disable_timer; /* delayed disable timer */
691 atomic_t count; /**< number of VBLANK interrupts */ 692
693 /* vblank counter, protected by dev->vblank_time_lock for writes */
694 u32 count;
695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */
696 struct timeval time[DRM_VBLANKTIME_RBSIZE];
697
692 atomic_t refcount; /* number of users of vblank interruptsper crtc */ 698 atomic_t refcount; /* number of users of vblank interruptsper crtc */
693 u32 last; /* protected by dev->vbl_lock, used */ 699 u32 last; /* protected by dev->vbl_lock, used */
694 /* for wraparound handling */ 700 /* for wraparound handling */
@@ -812,6 +818,7 @@ struct drm_device {
812#endif 818#endif
813 819
814 struct platform_device *platformdev; /**< Platform device struture */ 820 struct platform_device *platformdev; /**< Platform device struture */
821 struct virtio_device *virtdev;
815 822
816 struct drm_sg_mem *sg; /**< Scatter gather memory */ 823 struct drm_sg_mem *sg; /**< Scatter gather memory */
817 unsigned int num_crtcs; /**< Number of CRTCs on this device */ 824 unsigned int num_crtcs; /**< Number of CRTCs on this device */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index c157103492b0..8a3a913320eb 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -35,6 +35,11 @@ drm_atomic_state_alloc(struct drm_device *dev);
35void drm_atomic_state_clear(struct drm_atomic_state *state); 35void drm_atomic_state_clear(struct drm_atomic_state *state);
36void drm_atomic_state_free(struct drm_atomic_state *state); 36void drm_atomic_state_free(struct drm_atomic_state *state);
37 37
38int __must_check
39drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
40void drm_atomic_state_default_clear(struct drm_atomic_state *state);
41void drm_atomic_state_default_release(struct drm_atomic_state *state);
42
38struct drm_crtc_state * __must_check 43struct drm_crtc_state * __must_check
39drm_atomic_get_crtc_state(struct drm_atomic_state *state, 44drm_atomic_get_crtc_state(struct drm_atomic_state *state,
40 struct drm_crtc *crtc); 45 struct drm_crtc *crtc);
@@ -54,6 +59,62 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
54 struct drm_connector_state *state, struct drm_property *property, 59 struct drm_connector_state *state, struct drm_property *property,
55 uint64_t val); 60 uint64_t val);
56 61
62/**
63 * drm_atomic_get_existing_crtc_state - get crtc state, if it exists
64 * @state: global atomic state object
65 * @crtc: crtc to grab
66 *
67 * This function returns the crtc state for the given crtc, or NULL
68 * if the crtc is not part of the global atomic state.
69 */
70static inline struct drm_crtc_state *
71drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
72 struct drm_crtc *crtc)
73{
74 return state->crtc_states[drm_crtc_index(crtc)];
75}
76
77/**
78 * drm_atomic_get_existing_plane_state - get plane state, if it exists
79 * @state: global atomic state object
80 * @plane: plane to grab
81 *
82 * This function returns the plane state for the given plane, or NULL
83 * if the plane is not part of the global atomic state.
84 */
85static inline struct drm_plane_state *
86drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
87 struct drm_plane *plane)
88{
89 return state->plane_states[drm_plane_index(plane)];
90}
91
92/**
93 * drm_atomic_get_existing_connector_state - get connector state, if it exists
94 * @state: global atomic state object
95 * @connector: connector to grab
96 *
97 * This function returns the connector state for the given connector,
98 * or NULL if the connector is not part of the global atomic state.
99 */
100static inline struct drm_connector_state *
101drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
102 struct drm_connector *connector)
103{
104 int index = drm_connector_index(connector);
105
106 if (index >= state->num_connector)
107 return NULL;
108
109 return state->connector_states[index];
110}
111
112int __must_check
113drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
114 struct drm_display_mode *mode);
115int __must_check
116drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
117 struct drm_property_blob *blob);
57int __must_check 118int __must_check
58drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, 119drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
59 struct drm_crtc *crtc); 120 struct drm_crtc *crtc);
@@ -65,6 +126,10 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
65int __must_check 126int __must_check
66drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 127drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
67 struct drm_crtc *crtc); 128 struct drm_crtc *crtc);
129int __must_check
130drm_atomic_add_affected_planes(struct drm_atomic_state *state,
131 struct drm_crtc *crtc);
132
68int 133int
69drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, 134drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
70 struct drm_crtc *crtc); 135 struct drm_crtc *crtc);
@@ -77,26 +142,32 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
77 142
78#define for_each_connector_in_state(state, connector, connector_state, __i) \ 143#define for_each_connector_in_state(state, connector, connector_state, __i) \
79 for ((__i) = 0; \ 144 for ((__i) = 0; \
80 (connector) = (state)->connectors[__i], \ 145 (__i) < (state)->num_connector && \
81 (connector_state) = (state)->connector_states[__i], \ 146 ((connector) = (state)->connectors[__i], \
82 (__i) < (state)->num_connector; \ 147 (connector_state) = (state)->connector_states[__i], 1); \
83 (__i)++) \ 148 (__i)++) \
84 if (connector) 149 if (connector)
85 150
86#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ 151#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
87 for ((__i) = 0; \ 152 for ((__i) = 0; \
88 (crtc) = (state)->crtcs[__i], \ 153 (__i) < (state)->dev->mode_config.num_crtc && \
89 (crtc_state) = (state)->crtc_states[__i], \ 154 ((crtc) = (state)->crtcs[__i], \
90 (__i) < (state)->dev->mode_config.num_crtc; \ 155 (crtc_state) = (state)->crtc_states[__i], 1); \
91 (__i)++) \ 156 (__i)++) \
92 if (crtc_state) 157 if (crtc_state)
93 158
94#define for_each_plane_in_state(state, plane, plane_state, __i) \ 159#define for_each_plane_in_state(state, plane, plane_state, __i) \
95 for ((__i) = 0; \ 160 for ((__i) = 0; \
96 (plane) = (state)->planes[__i], \ 161 (__i) < (state)->dev->mode_config.num_total_plane && \
97 (plane_state) = (state)->plane_states[__i], \ 162 ((plane) = (state)->planes[__i], \
98 (__i) < (state)->dev->mode_config.num_total_plane; \ 163 (plane_state) = (state)->plane_states[__i], 1); \
99 (__i)++) \ 164 (__i)++) \
100 if (plane_state) 165 if (plane_state)
166static inline bool
167drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
168{
169 return state->mode_changed || state->active_changed;
170}
171
101 172
102#endif /* DRM_ATOMIC_H_ */ 173#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d665781eb542..cc1fee8a12d0 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -43,6 +43,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
43void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, 43void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
44 struct drm_atomic_state *old_state); 44 struct drm_atomic_state *old_state);
45 45
46void
47drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
48 struct drm_atomic_state *old_state);
49
46void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, 50void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
47 struct drm_atomic_state *state); 51 struct drm_atomic_state *state);
48void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, 52void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
@@ -54,6 +58,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
54 struct drm_atomic_state *state); 58 struct drm_atomic_state *state);
55void drm_atomic_helper_cleanup_planes(struct drm_device *dev, 59void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
56 struct drm_atomic_state *old_state); 60 struct drm_atomic_state *old_state);
61void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
57 62
58void drm_atomic_helper_swap_state(struct drm_device *dev, 63void drm_atomic_helper_swap_state(struct drm_device *dev,
59 struct drm_atomic_state *state); 64 struct drm_atomic_state *state);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index ca71c03143d1..57ca8cc383a6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -216,7 +216,10 @@ struct drm_framebuffer {
216 216
217struct drm_property_blob { 217struct drm_property_blob {
218 struct drm_mode_object base; 218 struct drm_mode_object base;
219 struct list_head head; 219 struct drm_device *dev;
220 struct kref refcount;
221 struct list_head head_global;
222 struct list_head head_file;
220 size_t length; 223 size_t length;
221 unsigned char data[]; 224 unsigned char data[];
222}; 225};
@@ -296,6 +299,9 @@ struct drm_crtc_state {
296 299
297 struct drm_display_mode mode; 300 struct drm_display_mode mode;
298 301
302 /* blob property to expose current mode to atomic userspace */
303 struct drm_property_blob *mode_blob;
304
299 struct drm_pending_vblank_event *event; 305 struct drm_pending_vblank_event *event;
300 306
301 struct drm_atomic_state *state; 307 struct drm_atomic_state *state;
@@ -647,6 +653,7 @@ struct drm_encoder {
647 * @audio_latency: audio latency info from ELD, if found 653 * @audio_latency: audio latency info from ELD, if found
648 * @null_edid_counter: track sinks that give us all zeros for the EDID 654 * @null_edid_counter: track sinks that give us all zeros for the EDID
649 * @bad_edid_counter: track sinks that give us an EDID with invalid checksum 655 * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
656 * @edid_corrupt: indicates whether the last read EDID was corrupt
650 * @debugfs_entry: debugfs directory for this connector 657 * @debugfs_entry: debugfs directory for this connector
651 * @state: current atomic state for this connector 658 * @state: current atomic state for this connector
652 * @has_tile: is this connector connected to a tiled monitor 659 * @has_tile: is this connector connected to a tiled monitor
@@ -719,6 +726,11 @@ struct drm_connector {
719 int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ 726 int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
720 unsigned bad_edid_counter; 727 unsigned bad_edid_counter;
721 728
729 /* Flag for raw EDID header corruption - used in Displayport
730 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
731 */
732 bool edid_corrupt;
733
722 struct dentry *debugfs_entry; 734 struct dentry *debugfs_entry;
723 735
724 struct drm_connector_state *state; 736 struct drm_connector_state *state;
@@ -731,6 +743,8 @@ struct drm_connector {
731 uint8_t num_h_tile, num_v_tile; 743 uint8_t num_h_tile, num_v_tile;
732 uint8_t tile_h_loc, tile_v_loc; 744 uint8_t tile_h_loc, tile_v_loc;
733 uint16_t tile_h_size, tile_v_size; 745 uint16_t tile_h_size, tile_v_size;
746
747 struct list_head destroy_list;
734}; 748};
735 749
736/** 750/**
@@ -895,6 +909,8 @@ struct drm_bridge_funcs {
895/** 909/**
896 * struct drm_bridge - central DRM bridge control structure 910 * struct drm_bridge - central DRM bridge control structure
897 * @dev: DRM device this bridge belongs to 911 * @dev: DRM device this bridge belongs to
912 * @encoder: encoder to which this bridge is connected
913 * @next: the next bridge in the encoder chain
898 * @of_node: device node pointer to the bridge 914 * @of_node: device node pointer to the bridge
899 * @list: to keep track of all added bridges 915 * @list: to keep track of all added bridges
900 * @base: base mode object 916 * @base: base mode object
@@ -904,6 +920,7 @@ struct drm_bridge_funcs {
904struct drm_bridge { 920struct drm_bridge {
905 struct drm_device *dev; 921 struct drm_device *dev;
906 struct drm_encoder *encoder; 922 struct drm_encoder *encoder;
923 struct drm_bridge *next;
907#ifdef CONFIG_OF 924#ifdef CONFIG_OF
908 struct device_node *of_node; 925 struct device_node *of_node;
909#endif 926#endif
@@ -977,6 +994,9 @@ struct drm_mode_set {
977 * @atomic_check: check whether a given atomic state update is possible 994 * @atomic_check: check whether a given atomic state update is possible
978 * @atomic_commit: commit an atomic state update previously verified with 995 * @atomic_commit: commit an atomic state update previously verified with
979 * atomic_check() 996 * atomic_check()
997 * @atomic_state_alloc: allocate a new atomic state
998 * @atomic_state_clear: clear the atomic state
999 * @atomic_state_free: free the atomic state
980 * 1000 *
981 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that 1001 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
982 * involve drivers. 1002 * involve drivers.
@@ -992,6 +1012,9 @@ struct drm_mode_config_funcs {
992 int (*atomic_commit)(struct drm_device *dev, 1012 int (*atomic_commit)(struct drm_device *dev,
993 struct drm_atomic_state *a, 1013 struct drm_atomic_state *a,
994 bool async); 1014 bool async);
1015 struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
1016 void (*atomic_state_clear)(struct drm_atomic_state *state);
1017 void (*atomic_state_free)(struct drm_atomic_state *state);
995}; 1018};
996 1019
997/** 1020/**
@@ -1048,6 +1071,7 @@ struct drm_mode_group {
1048 * @poll_running: track polling status for this device 1071 * @poll_running: track polling status for this device
1049 * @output_poll_work: delayed work for polling in process context 1072 * @output_poll_work: delayed work for polling in process context
1050 * @property_blob_list: list of all the blob property objects 1073 * @property_blob_list: list of all the blob property objects
1074 * @blob_lock: mutex for blob property allocation and management
1051 * @*_property: core property tracking 1075 * @*_property: core property tracking
1052 * @preferred_depth: preferred RBG pixel depth, used by fb helpers 1076 * @preferred_depth: preferred RBG pixel depth, used by fb helpers
1053 * @prefer_shadow: hint to userspace to prefer shadow-fb rendering 1077 * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
@@ -1103,6 +1127,8 @@ struct drm_mode_config {
1103 bool delayed_event; 1127 bool delayed_event;
1104 struct delayed_work output_poll_work; 1128 struct delayed_work output_poll_work;
1105 1129
1130 struct mutex blob_lock;
1131
1106 /* pointers to standard properties */ 1132 /* pointers to standard properties */
1107 struct list_head property_blob_list; 1133 struct list_head property_blob_list;
1108 struct drm_property *edid_property; 1134 struct drm_property *edid_property;
@@ -1122,6 +1148,7 @@ struct drm_mode_config {
1122 struct drm_property *prop_fb_id; 1148 struct drm_property *prop_fb_id;
1123 struct drm_property *prop_crtc_id; 1149 struct drm_property *prop_crtc_id;
1124 struct drm_property *prop_active; 1150 struct drm_property *prop_active;
1151 struct drm_property *prop_mode_id;
1125 1152
1126 /* DVI-I properties */ 1153 /* DVI-I properties */
1127 struct drm_property *dvi_i_subconnector_property; 1154 struct drm_property *dvi_i_subconnector_property;
@@ -1230,6 +1257,17 @@ extern void drm_bridge_remove(struct drm_bridge *bridge);
1230extern struct drm_bridge *of_drm_find_bridge(struct device_node *np); 1257extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
1231extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge); 1258extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
1232 1259
1260bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
1261 const struct drm_display_mode *mode,
1262 struct drm_display_mode *adjusted_mode);
1263void drm_bridge_disable(struct drm_bridge *bridge);
1264void drm_bridge_post_disable(struct drm_bridge *bridge);
1265void drm_bridge_mode_set(struct drm_bridge *bridge,
1266 struct drm_display_mode *mode,
1267 struct drm_display_mode *adjusted_mode);
1268void drm_bridge_pre_enable(struct drm_bridge *bridge);
1269void drm_bridge_enable(struct drm_bridge *bridge);
1270
1233extern int drm_encoder_init(struct drm_device *dev, 1271extern int drm_encoder_init(struct drm_device *dev,
1234 struct drm_encoder *encoder, 1272 struct drm_encoder *encoder,
1235 const struct drm_encoder_funcs *funcs, 1273 const struct drm_encoder_funcs *funcs,
@@ -1263,6 +1301,7 @@ extern int drm_plane_init(struct drm_device *dev,
1263 bool is_primary); 1301 bool is_primary);
1264extern void drm_plane_cleanup(struct drm_plane *plane); 1302extern void drm_plane_cleanup(struct drm_plane *plane);
1265extern unsigned int drm_plane_index(struct drm_plane *plane); 1303extern unsigned int drm_plane_index(struct drm_plane *plane);
1304extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
1266extern void drm_plane_force_disable(struct drm_plane *plane); 1305extern void drm_plane_force_disable(struct drm_plane *plane);
1267extern int drm_plane_check_pixel_format(const struct drm_plane *plane, 1306extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
1268 u32 format); 1307 u32 format);
@@ -1283,6 +1322,8 @@ extern const char *drm_get_dvi_i_select_name(int val);
1283extern const char *drm_get_tv_subconnector_name(int val); 1322extern const char *drm_get_tv_subconnector_name(int val);
1284extern const char *drm_get_tv_select_name(int val); 1323extern const char *drm_get_tv_select_name(int val);
1285extern void drm_fb_release(struct drm_file *file_priv); 1324extern void drm_fb_release(struct drm_file *file_priv);
1325extern void drm_property_destroy_user_blobs(struct drm_device *dev,
1326 struct drm_file *file_priv);
1286extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); 1327extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
1287extern void drm_mode_group_destroy(struct drm_mode_group *group); 1328extern void drm_mode_group_destroy(struct drm_mode_group *group);
1288extern void drm_reinit_primary_mode_group(struct drm_device *dev); 1329extern void drm_reinit_primary_mode_group(struct drm_device *dev);
@@ -1362,6 +1403,13 @@ struct drm_property *drm_property_create_object(struct drm_device *dev,
1362 int flags, const char *name, uint32_t type); 1403 int flags, const char *name, uint32_t type);
1363struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, 1404struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
1364 const char *name); 1405 const char *name);
1406struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
1407 size_t length,
1408 const void *data);
1409struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
1410 uint32_t id);
1411struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
1412void drm_property_unreference_blob(struct drm_property_blob *blob);
1365extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); 1413extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
1366extern int drm_property_add_enum(struct drm_property *property, int index, 1414extern int drm_property_add_enum(struct drm_property *property, int index,
1367 uint64_t value, const char *name); 1415 uint64_t value, const char *name);
@@ -1421,6 +1469,10 @@ extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
1421 void *data, struct drm_file *file_priv); 1469 void *data, struct drm_file *file_priv);
1422extern int drm_mode_getblob_ioctl(struct drm_device *dev, 1470extern int drm_mode_getblob_ioctl(struct drm_device *dev,
1423 void *data, struct drm_file *file_priv); 1471 void *data, struct drm_file *file_priv);
1472extern int drm_mode_createblob_ioctl(struct drm_device *dev,
1473 void *data, struct drm_file *file_priv);
1474extern int drm_mode_destroyblob_ioctl(struct drm_device *dev,
1475 void *data, struct drm_file *file_priv);
1424extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 1476extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
1425 void *data, struct drm_file *file_priv); 1477 void *data, struct drm_file *file_priv);
1426extern int drm_mode_getencoder(struct drm_device *dev, 1478extern int drm_mode_getencoder(struct drm_device *dev,
@@ -1442,7 +1494,8 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
1442 int hpref, int vpref); 1494 int hpref, int vpref);
1443 1495
1444extern int drm_edid_header_is_valid(const u8 *raw_edid); 1496extern int drm_edid_header_is_valid(const u8 *raw_edid);
1445extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1497extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
1498 bool *edid_corrupt);
1446extern bool drm_edid_is_valid(struct edid *edid); 1499extern bool drm_edid_is_valid(struct edid *edid);
1447 1500
1448extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, 1501extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
@@ -1525,14 +1578,6 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
1525 return mo ? obj_to_property(mo) : NULL; 1578 return mo ? obj_to_property(mo) : NULL;
1526} 1579}
1527 1580
1528static inline struct drm_property_blob *
1529drm_property_blob_find(struct drm_device *dev, uint32_t id)
1530{
1531 struct drm_mode_object *mo;
1532 mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
1533 return mo ? obj_to_blob(mo) : NULL;
1534}
1535
1536/* Plane list iterator for legacy (overlay only) planes. */ 1581/* Plane list iterator for legacy (overlay only) planes. */
1537#define drm_for_each_legacy_plane(plane, planelist) \ 1582#define drm_for_each_legacy_plane(plane, planelist) \
1538 list_for_each_entry(plane, planelist, head) \ 1583 list_for_each_entry(plane, planelist, head) \
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c8fc187061de..918aa68b5199 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
168 * @get_modes: get mode list for this connector 168 * @get_modes: get mode list for this connector
169 * @mode_valid: is this mode valid on the given connector? (optional) 169 * @mode_valid: is this mode valid on the given connector? (optional)
170 * @best_encoder: return the preferred encoder for this connector 170 * @best_encoder: return the preferred encoder for this connector
171 * @atomic_best_encoder: atomic version of @best_encoder
171 * 172 *
172 * The helper operations are called by the mid-layer CRTC helper. 173 * The helper operations are called by the mid-layer CRTC helper.
173 */ 174 */
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
176 enum drm_mode_status (*mode_valid)(struct drm_connector *connector, 177 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
177 struct drm_display_mode *mode); 178 struct drm_display_mode *mode);
178 struct drm_encoder *(*best_encoder)(struct drm_connector *connector); 179 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
180 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
181 struct drm_connector_state *connector_state);
179}; 182};
180 183
181extern void drm_helper_disable_unused_functions(struct drm_device *dev); 184extern void drm_helper_disable_unused_functions(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 523f04c90dea..2e86f642fc33 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -679,9 +679,9 @@ struct drm_dp_aux_msg {
679 * An AUX channel can also be used to transport I2C messages to a sink. A 679 * An AUX channel can also be used to transport I2C messages to a sink. A
680 * typical application of that is to access an EDID that's present in the 680 * typical application of that is to access an EDID that's present in the
681 * sink device. The .transfer() function can also be used to execute such 681 * sink device. The .transfer() function can also be used to execute such
682 * transactions. The drm_dp_aux_register_i2c_bus() function registers an 682 * transactions. The drm_dp_aux_register() function registers an I2C
683 * I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers 683 * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
684 * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter. 684 * should call drm_dp_aux_unregister() to remove the I2C adapter.
685 * The I2C adapter uses long transfers by default; if a partial response is 685 * The I2C adapter uses long transfers by default; if a partial response is
686 * received, the adapter will drop down to the size given by the partial 686 * received, the adapter will drop down to the size given by the partial
687 * response for this transaction only. 687 * response for this transaction only.
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index a2507817be41..86d0b25ed054 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -463,6 +463,10 @@ struct drm_dp_mst_topology_mgr {
463 struct work_struct work; 463 struct work_struct work;
464 464
465 struct work_struct tx_work; 465 struct work_struct tx_work;
466
467 struct list_head destroy_connector_list;
468 struct mutex destroy_connector_lock;
469 struct work_struct destroy_connector_work;
466}; 470};
467 471
468int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id); 472int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 19a240446fca..e42495ad8136 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -56,10 +56,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
56 56
57static __inline void drm_free_large(void *ptr) 57static __inline void drm_free_large(void *ptr)
58{ 58{
59 if (!is_vmalloc_addr(ptr)) 59 kvfree(ptr);
60 return kfree(ptr);
61
62 vfree(ptr);
63} 60}
64 61
65#endif 62#endif
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 0616188c7801..08a8cac9e555 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -182,6 +182,10 @@ struct drm_cmdline_mode;
182 182
183struct drm_display_mode *drm_mode_create(struct drm_device *dev); 183struct drm_display_mode *drm_mode_create(struct drm_device *dev);
184void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); 184void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
185void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
186 const struct drm_display_mode *in);
187int drm_mode_convert_umode(struct drm_display_mode *out,
188 const struct drm_mode_modeinfo *in);
185void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); 189void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
186void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); 190void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
187 191
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 3e2f22e5bf3c..c9a8b64aa33b 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -31,6 +31,7 @@ struct i915_audio_component {
31 struct module *owner; 31 struct module *owner;
32 void (*get_power)(struct device *); 32 void (*get_power)(struct device *);
33 void (*put_power)(struct device *); 33 void (*put_power)(struct device *);
34 void (*codec_wake_override)(struct device *, bool enable);
34 int (*get_cdclk_freq)(struct device *); 35 int (*get_cdclk_freq)(struct device *);
35 } *ops; 36 } *ops;
36}; 37};
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 613372375ada..17c445612e01 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -286,5 +286,9 @@
286 INTEL_SKL_GT2_IDS(info), \ 286 INTEL_SKL_GT2_IDS(info), \
287 INTEL_SKL_GT3_IDS(info) 287 INTEL_SKL_GT3_IDS(info)
288 288
289#define INTEL_BXT_IDS(info) \
290 INTEL_VGA_DEVICE(0x0A84, info), \
291 INTEL_VGA_DEVICE(0x1A84, info), \
292 INTEL_VGA_DEVICE(0x5A84, info)
289 293
290#endif /* _I915_PCIIDS_H */ 294#endif /* _I915_PCIIDS_H */
diff --git a/include/dt-bindings/clock/bcm-cygnus.h b/include/dt-bindings/clock/bcm-cygnus.h
new file mode 100644
index 000000000000..32fbc475087a
--- /dev/null
+++ b/include/dt-bindings/clock/bcm-cygnus.h
@@ -0,0 +1,68 @@
1/*
2 * BSD LICENSE
3 *
4 * Copyright(c) 2014 Broadcom Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Broadcom Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#ifndef _CLOCK_BCM_CYGNUS_H
34#define _CLOCK_BCM_CYGNUS_H
35
36/* GENPLL clock ID */
37#define BCM_CYGNUS_GENPLL 0
38#define BCM_CYGNUS_GENPLL_AXI21_CLK 1
39#define BCM_CYGNUS_GENPLL_250MHZ_CLK 2
40#define BCM_CYGNUS_GENPLL_IHOST_SYS_CLK 3
41#define BCM_CYGNUS_GENPLL_ENET_SW_CLK 4
42#define BCM_CYGNUS_GENPLL_AUDIO_125_CLK 5
43#define BCM_CYGNUS_GENPLL_CAN_CLK 6
44
45/* LCPLL0 clock ID */
46#define BCM_CYGNUS_LCPLL0 0
47#define BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK 1
48#define BCM_CYGNUS_LCPLL0_DDR_PHY_CLK 2
49#define BCM_CYGNUS_LCPLL0_SDIO_CLK 3
50#define BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK 4
51#define BCM_CYGNUS_LCPLL0_SMART_CARD_CLK 5
52#define BCM_CYGNUS_LCPLL0_CH5_UNUSED 6
53
54/* MIPI PLL clock ID */
55#define BCM_CYGNUS_MIPIPLL 0
56#define BCM_CYGNUS_MIPIPLL_CH0_UNUSED 1
57#define BCM_CYGNUS_MIPIPLL_CH1_LCD 2
58#define BCM_CYGNUS_MIPIPLL_CH2_V3D 3
59#define BCM_CYGNUS_MIPIPLL_CH3_UNUSED 4
60#define BCM_CYGNUS_MIPIPLL_CH4_UNUSED 5
61#define BCM_CYGNUS_MIPIPLL_CH5_UNUSED 6
62
63/* ASIU clock ID */
64#define BCM_CYGNUS_ASIU_KEYPAD_CLK 0
65#define BCM_CYGNUS_ASIU_ADC_CLK 1
66#define BCM_CYGNUS_ASIU_PWM_CLK 2
67
68#endif /* _CLOCK_BCM_CYGNUS_H */
diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h
new file mode 100644
index 000000000000..70ee3833a7a0
--- /dev/null
+++ b/include/dt-bindings/clock/hi6220-clock.h
@@ -0,0 +1,173 @@
1/*
2 * Copyright (c) 2015 Hisilicon Limited.
3 *
4 * Author: Bintian Wang <bintian.wang@huawei.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __DT_BINDINGS_CLOCK_HI6220_H
12#define __DT_BINDINGS_CLOCK_HI6220_H
13
14/* clk in Hi6220 AO (always on) controller */
15#define HI6220_NONE_CLOCK 0
16
17/* fixed rate clocks */
18#define HI6220_REF32K 1
19#define HI6220_CLK_TCXO 2
20#define HI6220_MMC1_PAD 3
21#define HI6220_MMC2_PAD 4
22#define HI6220_MMC0_PAD 5
23#define HI6220_PLL_BBP 6
24#define HI6220_PLL_GPU 7
25#define HI6220_PLL1_DDR 8
26#define HI6220_PLL_SYS 9
27#define HI6220_PLL_SYS_MEDIA 10
28#define HI6220_DDR_SRC 11
29#define HI6220_PLL_MEDIA 12
30#define HI6220_PLL_DDR 13
31
32/* fixed factor clocks */
33#define HI6220_300M 14
34#define HI6220_150M 15
35#define HI6220_PICOPHY_SRC 16
36#define HI6220_MMC0_SRC_SEL 17
37#define HI6220_MMC1_SRC_SEL 18
38#define HI6220_MMC2_SRC_SEL 19
39#define HI6220_VPU_CODEC 20
40#define HI6220_MMC0_SMP 21
41#define HI6220_MMC1_SMP 22
42#define HI6220_MMC2_SMP 23
43
44/* gate clocks */
45#define HI6220_WDT0_PCLK 24
46#define HI6220_WDT1_PCLK 25
47#define HI6220_WDT2_PCLK 26
48#define HI6220_TIMER0_PCLK 27
49#define HI6220_TIMER1_PCLK 28
50#define HI6220_TIMER2_PCLK 29
51#define HI6220_TIMER3_PCLK 30
52#define HI6220_TIMER4_PCLK 31
53#define HI6220_TIMER5_PCLK 32
54#define HI6220_TIMER6_PCLK 33
55#define HI6220_TIMER7_PCLK 34
56#define HI6220_TIMER8_PCLK 35
57#define HI6220_UART0_PCLK 36
58
59#define HI6220_AO_NR_CLKS 37
60
61/* clk in Hi6220 systrl */
62/* gate clock */
63#define HI6220_MMC0_CLK 1
64#define HI6220_MMC0_CIUCLK 2
65#define HI6220_MMC1_CLK 3
66#define HI6220_MMC1_CIUCLK 4
67#define HI6220_MMC2_CLK 5
68#define HI6220_MMC2_CIUCLK 6
69#define HI6220_USBOTG_HCLK 7
70#define HI6220_CLK_PICOPHY 8
71#define HI6220_HIFI 9
72#define HI6220_DACODEC_PCLK 10
73#define HI6220_EDMAC_ACLK 11
74#define HI6220_CS_ATB 12
75#define HI6220_I2C0_CLK 13
76#define HI6220_I2C1_CLK 14
77#define HI6220_I2C2_CLK 15
78#define HI6220_I2C3_CLK 16
79#define HI6220_UART1_PCLK 17
80#define HI6220_UART2_PCLK 18
81#define HI6220_UART3_PCLK 19
82#define HI6220_UART4_PCLK 20
83#define HI6220_SPI_CLK 21
84#define HI6220_TSENSOR_CLK 22
85#define HI6220_MMU_CLK 23
86#define HI6220_HIFI_SEL 24
87#define HI6220_MMC0_SYSPLL 25
88#define HI6220_MMC1_SYSPLL 26
89#define HI6220_MMC2_SYSPLL 27
90#define HI6220_MMC0_SEL 28
91#define HI6220_MMC1_SEL 29
92#define HI6220_BBPPLL_SEL 30
93#define HI6220_MEDIA_PLL_SRC 31
94#define HI6220_MMC2_SEL 32
95#define HI6220_CS_ATB_SYSPLL 33
96
97/* mux clocks */
98#define HI6220_MMC0_SRC 34
99#define HI6220_MMC0_SMP_IN 35
100#define HI6220_MMC1_SRC 36
101#define HI6220_MMC1_SMP_IN 37
102#define HI6220_MMC2_SRC 38
103#define HI6220_MMC2_SMP_IN 39
104#define HI6220_HIFI_SRC 40
105#define HI6220_UART1_SRC 41
106#define HI6220_UART2_SRC 42
107#define HI6220_UART3_SRC 43
108#define HI6220_UART4_SRC 44
109#define HI6220_MMC0_MUX0 45
110#define HI6220_MMC1_MUX0 46
111#define HI6220_MMC2_MUX0 47
112#define HI6220_MMC0_MUX1 48
113#define HI6220_MMC1_MUX1 49
114#define HI6220_MMC2_MUX1 50
115
116/* divider clocks */
117#define HI6220_CLK_BUS 51
118#define HI6220_MMC0_DIV 52
119#define HI6220_MMC1_DIV 53
120#define HI6220_MMC2_DIV 54
121#define HI6220_HIFI_DIV 55
122#define HI6220_BBPPLL0_DIV 56
123#define HI6220_CS_DAPB 57
124#define HI6220_CS_ATB_DIV 58
125
126#define HI6220_SYS_NR_CLKS 59
127
128/* clk in Hi6220 media controller */
129/* gate clocks */
130#define HI6220_DSI_PCLK 1
131#define HI6220_G3D_PCLK 2
132#define HI6220_ACLK_CODEC_VPU 3
133#define HI6220_ISP_SCLK 4
134#define HI6220_ADE_CORE 5
135#define HI6220_MED_MMU 6
136#define HI6220_CFG_CSI4PHY 7
137#define HI6220_CFG_CSI2PHY 8
138#define HI6220_ISP_SCLK_GATE 9
139#define HI6220_ISP_SCLK_GATE1 10
140#define HI6220_ADE_CORE_GATE 11
141#define HI6220_CODEC_VPU_GATE 12
142#define HI6220_MED_SYSPLL 13
143
144/* mux clocks */
145#define HI6220_1440_1200 14
146#define HI6220_1000_1200 15
147#define HI6220_1000_1440 16
148
149/* divider clocks */
150#define HI6220_CODEC_JPEG 17
151#define HI6220_ISP_SCLK_SRC 18
152#define HI6220_ISP_SCLK1 19
153#define HI6220_ADE_CORE_SRC 20
154#define HI6220_ADE_PIX_SRC 21
155#define HI6220_G3D_CLK 22
156#define HI6220_CODEC_VPU_SRC 23
157
158#define HI6220_MEDIA_NR_CLKS 24
159
160/* clk in Hi6220 power controller */
161/* gate clocks */
162#define HI6220_PLL_GPU_GATE 1
163#define HI6220_PLL1_DDR_GATE 2
164#define HI6220_PLL_DDR_GATE 3
165#define HI6220_PLL_MEDIA_GATE 4
166#define HI6220_PLL0_BBP_GATE 5
167
168/* divider clocks */
169#define HI6220_DDRC_SRC 6
170#define HI6220_DDRC_AXI1 7
171
172#define HI6220_POWER_NR_CLKS 8
173#endif
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
new file mode 100644
index 000000000000..728df28b00d5
--- /dev/null
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -0,0 +1,450 @@
1/*
2 * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#ifndef __DT_BINDINGS_CLOCK_IMX7D_H
11#define __DT_BINDINGS_CLOCK_IMX7D_H
12
13#define IMX7D_OSC_24M_CLK 0
14#define IMX7D_PLL_ARM_MAIN 1
15#define IMX7D_PLL_ARM_MAIN_CLK 2
16#define IMX7D_PLL_ARM_MAIN_SRC 3
17#define IMX7D_PLL_ARM_MAIN_BYPASS 4
18#define IMX7D_PLL_SYS_MAIN 5
19#define IMX7D_PLL_SYS_MAIN_CLK 6
20#define IMX7D_PLL_SYS_MAIN_SRC 7
21#define IMX7D_PLL_SYS_MAIN_BYPASS 8
22#define IMX7D_PLL_SYS_MAIN_480M 9
23#define IMX7D_PLL_SYS_MAIN_240M 10
24#define IMX7D_PLL_SYS_MAIN_120M 11
25#define IMX7D_PLL_SYS_MAIN_480M_CLK 12
26#define IMX7D_PLL_SYS_MAIN_240M_CLK 13
27#define IMX7D_PLL_SYS_MAIN_120M_CLK 14
28#define IMX7D_PLL_SYS_PFD0_392M_CLK 15
29#define IMX7D_PLL_SYS_PFD0_196M 16
30#define IMX7D_PLL_SYS_PFD0_196M_CLK 17
31#define IMX7D_PLL_SYS_PFD1_332M_CLK 18
32#define IMX7D_PLL_SYS_PFD1_166M 19
33#define IMX7D_PLL_SYS_PFD1_166M_CLK 20
34#define IMX7D_PLL_SYS_PFD2_270M_CLK 21
35#define IMX7D_PLL_SYS_PFD2_135M 22
36#define IMX7D_PLL_SYS_PFD2_135M_CLK 23
37#define IMX7D_PLL_SYS_PFD3_CLK 24
38#define IMX7D_PLL_SYS_PFD4_CLK 25
39#define IMX7D_PLL_SYS_PFD5_CLK 26
40#define IMX7D_PLL_SYS_PFD6_CLK 27
41#define IMX7D_PLL_SYS_PFD7_CLK 28
42#define IMX7D_PLL_ENET_MAIN 29
43#define IMX7D_PLL_ENET_MAIN_CLK 30
44#define IMX7D_PLL_ENET_MAIN_SRC 31
45#define IMX7D_PLL_ENET_MAIN_BYPASS 32
46#define IMX7D_PLL_ENET_MAIN_500M 33
47#define IMX7D_PLL_ENET_MAIN_250M 34
48#define IMX7D_PLL_ENET_MAIN_125M 35
49#define IMX7D_PLL_ENET_MAIN_100M 36
50#define IMX7D_PLL_ENET_MAIN_50M 37
51#define IMX7D_PLL_ENET_MAIN_40M 38
52#define IMX7D_PLL_ENET_MAIN_25M 39
53#define IMX7D_PLL_ENET_MAIN_500M_CLK 40
54#define IMX7D_PLL_ENET_MAIN_250M_CLK 41
55#define IMX7D_PLL_ENET_MAIN_125M_CLK 42
56#define IMX7D_PLL_ENET_MAIN_100M_CLK 43
57#define IMX7D_PLL_ENET_MAIN_50M_CLK 44
58#define IMX7D_PLL_ENET_MAIN_40M_CLK 45
59#define IMX7D_PLL_ENET_MAIN_25M_CLK 46
60#define IMX7D_PLL_DRAM_MAIN 47
61#define IMX7D_PLL_DRAM_MAIN_CLK 48
62#define IMX7D_PLL_DRAM_MAIN_SRC 49
63#define IMX7D_PLL_DRAM_MAIN_BYPASS 50
64#define IMX7D_PLL_DRAM_MAIN_533M 51
65#define IMX7D_PLL_DRAM_MAIN_533M_CLK 52
66#define IMX7D_PLL_AUDIO_MAIN 53
67#define IMX7D_PLL_AUDIO_MAIN_CLK 54
68#define IMX7D_PLL_AUDIO_MAIN_SRC 55
69#define IMX7D_PLL_AUDIO_MAIN_BYPASS 56
70#define IMX7D_PLL_VIDEO_MAIN_CLK 57
71#define IMX7D_PLL_VIDEO_MAIN 58
72#define IMX7D_PLL_VIDEO_MAIN_SRC 59
73#define IMX7D_PLL_VIDEO_MAIN_BYPASS 60
74#define IMX7D_USB_MAIN_480M_CLK 61
75#define IMX7D_ARM_A7_ROOT_CLK 62
76#define IMX7D_ARM_A7_ROOT_SRC 63
77#define IMX7D_ARM_A7_ROOT_CG 64
78#define IMX7D_ARM_A7_ROOT_DIV 65
79#define IMX7D_ARM_M4_ROOT_CLK 66
80#define IMX7D_ARM_M4_ROOT_SRC 67
81#define IMX7D_ARM_M4_ROOT_CG 68
82#define IMX7D_ARM_M4_ROOT_DIV 69
83#define IMX7D_ARM_M0_ROOT_CLK 70
84#define IMX7D_ARM_M0_ROOT_SRC 71
85#define IMX7D_ARM_M0_ROOT_CG 72
86#define IMX7D_ARM_M0_ROOT_DIV 73
87#define IMX7D_MAIN_AXI_ROOT_CLK 74
88#define IMX7D_MAIN_AXI_ROOT_SRC 75
89#define IMX7D_MAIN_AXI_ROOT_CG 76
90#define IMX7D_MAIN_AXI_ROOT_DIV 77
91#define IMX7D_DISP_AXI_ROOT_CLK 78
92#define IMX7D_DISP_AXI_ROOT_SRC 79
93#define IMX7D_DISP_AXI_ROOT_CG 80
94#define IMX7D_DISP_AXI_ROOT_DIV 81
95#define IMX7D_ENET_AXI_ROOT_CLK 82
96#define IMX7D_ENET_AXI_ROOT_SRC 83
97#define IMX7D_ENET_AXI_ROOT_CG 84
98#define IMX7D_ENET_AXI_ROOT_DIV 85
99#define IMX7D_NAND_USDHC_BUS_ROOT_CLK 86
100#define IMX7D_NAND_USDHC_BUS_ROOT_SRC 87
101#define IMX7D_NAND_USDHC_BUS_ROOT_CG 88
102#define IMX7D_NAND_USDHC_BUS_ROOT_DIV 89
103#define IMX7D_AHB_CHANNEL_ROOT_CLK 90
104#define IMX7D_AHB_CHANNEL_ROOT_SRC 91
105#define IMX7D_AHB_CHANNEL_ROOT_CG 92
106#define IMX7D_AHB_CHANNEL_ROOT_DIV 93
107#define IMX7D_DRAM_PHYM_ROOT_CLK 94
108#define IMX7D_DRAM_PHYM_ROOT_SRC 95
109#define IMX7D_DRAM_PHYM_ROOT_CG 96
110#define IMX7D_DRAM_PHYM_ROOT_DIV 97
111#define IMX7D_DRAM_ROOT_CLK 98
112#define IMX7D_DRAM_ROOT_SRC 99
113#define IMX7D_DRAM_ROOT_CG 100
114#define IMX7D_DRAM_ROOT_DIV 101
115#define IMX7D_DRAM_PHYM_ALT_ROOT_CLK 102
116#define IMX7D_DRAM_PHYM_ALT_ROOT_SRC 103
117#define IMX7D_DRAM_PHYM_ALT_ROOT_CG 104
118#define IMX7D_DRAM_PHYM_ALT_ROOT_DIV 105
119#define IMX7D_DRAM_ALT_ROOT_CLK 106
120#define IMX7D_DRAM_ALT_ROOT_SRC 107
121#define IMX7D_DRAM_ALT_ROOT_CG 108
122#define IMX7D_DRAM_ALT_ROOT_DIV 109
123#define IMX7D_USB_HSIC_ROOT_CLK 110
124#define IMX7D_USB_HSIC_ROOT_SRC 111
125#define IMX7D_USB_HSIC_ROOT_CG 112
126#define IMX7D_USB_HSIC_ROOT_DIV 113
127#define IMX7D_PCIE_CTRL_ROOT_CLK 114
128#define IMX7D_PCIE_CTRL_ROOT_SRC 115
129#define IMX7D_PCIE_CTRL_ROOT_CG 116
130#define IMX7D_PCIE_CTRL_ROOT_DIV 117
131#define IMX7D_PCIE_PHY_ROOT_CLK 118
132#define IMX7D_PCIE_PHY_ROOT_SRC 119
133#define IMX7D_PCIE_PHY_ROOT_CG 120
134#define IMX7D_PCIE_PHY_ROOT_DIV 121
135#define IMX7D_EPDC_PIXEL_ROOT_CLK 122
136#define IMX7D_EPDC_PIXEL_ROOT_SRC 123
137#define IMX7D_EPDC_PIXEL_ROOT_CG 124
138#define IMX7D_EPDC_PIXEL_ROOT_DIV 125
139#define IMX7D_LCDIF_PIXEL_ROOT_CLK 126
140#define IMX7D_LCDIF_PIXEL_ROOT_SRC 127
141#define IMX7D_LCDIF_PIXEL_ROOT_CG 128
142#define IMX7D_LCDIF_PIXEL_ROOT_DIV 129
143#define IMX7D_MIPI_DSI_ROOT_CLK 130
144#define IMX7D_MIPI_DSI_ROOT_SRC 131
145#define IMX7D_MIPI_DSI_ROOT_CG 132
146#define IMX7D_MIPI_DSI_ROOT_DIV 133
147#define IMX7D_MIPI_CSI_ROOT_CLK 134
148#define IMX7D_MIPI_CSI_ROOT_SRC 135
149#define IMX7D_MIPI_CSI_ROOT_CG 136
150#define IMX7D_MIPI_CSI_ROOT_DIV 137
151#define IMX7D_MIPI_DPHY_ROOT_CLK 138
152#define IMX7D_MIPI_DPHY_ROOT_SRC 139
153#define IMX7D_MIPI_DPHY_ROOT_CG 140
154#define IMX7D_MIPI_DPHY_ROOT_DIV 141
155#define IMX7D_SAI1_ROOT_CLK 142
156#define IMX7D_SAI1_ROOT_SRC 143
157#define IMX7D_SAI1_ROOT_CG 144
158#define IMX7D_SAI1_ROOT_DIV 145
159#define IMX7D_SAI2_ROOT_CLK 146
160#define IMX7D_SAI2_ROOT_SRC 147
161#define IMX7D_SAI2_ROOT_CG 148
162#define IMX7D_SAI2_ROOT_DIV 149
163#define IMX7D_SAI3_ROOT_CLK 150
164#define IMX7D_SAI3_ROOT_SRC 151
165#define IMX7D_SAI3_ROOT_CG 152
166#define IMX7D_SAI3_ROOT_DIV 153
167#define IMX7D_SPDIF_ROOT_CLK 154
168#define IMX7D_SPDIF_ROOT_SRC 155
169#define IMX7D_SPDIF_ROOT_CG 156
170#define IMX7D_SPDIF_ROOT_DIV 157
171#define IMX7D_ENET1_REF_ROOT_CLK 158
172#define IMX7D_ENET1_REF_ROOT_SRC 159
173#define IMX7D_ENET1_REF_ROOT_CG 160
174#define IMX7D_ENET1_REF_ROOT_DIV 161
175#define IMX7D_ENET1_TIME_ROOT_CLK 162
176#define IMX7D_ENET1_TIME_ROOT_SRC 163
177#define IMX7D_ENET1_TIME_ROOT_CG 164
178#define IMX7D_ENET1_TIME_ROOT_DIV 165
179#define IMX7D_ENET2_REF_ROOT_CLK 166
180#define IMX7D_ENET2_REF_ROOT_SRC 167
181#define IMX7D_ENET2_REF_ROOT_CG 168
182#define IMX7D_ENET2_REF_ROOT_DIV 169
183#define IMX7D_ENET2_TIME_ROOT_CLK 170
184#define IMX7D_ENET2_TIME_ROOT_SRC 171
185#define IMX7D_ENET2_TIME_ROOT_CG 172
186#define IMX7D_ENET2_TIME_ROOT_DIV 173
187#define IMX7D_ENET_PHY_REF_ROOT_CLK 174
188#define IMX7D_ENET_PHY_REF_ROOT_SRC 175
189#define IMX7D_ENET_PHY_REF_ROOT_CG 176
190#define IMX7D_ENET_PHY_REF_ROOT_DIV 177
191#define IMX7D_EIM_ROOT_CLK 178
192#define IMX7D_EIM_ROOT_SRC 179
193#define IMX7D_EIM_ROOT_CG 180
194#define IMX7D_EIM_ROOT_DIV 181
195#define IMX7D_NAND_ROOT_CLK 182
196#define IMX7D_NAND_ROOT_SRC 183
197#define IMX7D_NAND_ROOT_CG 184
198#define IMX7D_NAND_ROOT_DIV 185
199#define IMX7D_QSPI_ROOT_CLK 186
200#define IMX7D_QSPI_ROOT_SRC 187
201#define IMX7D_QSPI_ROOT_CG 188
202#define IMX7D_QSPI_ROOT_DIV 189
203#define IMX7D_USDHC1_ROOT_CLK 190
204#define IMX7D_USDHC1_ROOT_SRC 191
205#define IMX7D_USDHC1_ROOT_CG 192
206#define IMX7D_USDHC1_ROOT_DIV 193
207#define IMX7D_USDHC2_ROOT_CLK 194
208#define IMX7D_USDHC2_ROOT_SRC 195
209#define IMX7D_USDHC2_ROOT_CG 196
210#define IMX7D_USDHC2_ROOT_DIV 197
211#define IMX7D_USDHC3_ROOT_CLK 198
212#define IMX7D_USDHC3_ROOT_SRC 199
213#define IMX7D_USDHC3_ROOT_CG 200
214#define IMX7D_USDHC3_ROOT_DIV 201
215#define IMX7D_CAN1_ROOT_CLK 202
216#define IMX7D_CAN1_ROOT_SRC 203
217#define IMX7D_CAN1_ROOT_CG 204
218#define IMX7D_CAN1_ROOT_DIV 205
219#define IMX7D_CAN2_ROOT_CLK 206
220#define IMX7D_CAN2_ROOT_SRC 207
221#define IMX7D_CAN2_ROOT_CG 208
222#define IMX7D_CAN2_ROOT_DIV 209
223#define IMX7D_I2C1_ROOT_CLK 210
224#define IMX7D_I2C1_ROOT_SRC 211
225#define IMX7D_I2C1_ROOT_CG 212
226#define IMX7D_I2C1_ROOT_DIV 213
227#define IMX7D_I2C2_ROOT_CLK 214
228#define IMX7D_I2C2_ROOT_SRC 215
229#define IMX7D_I2C2_ROOT_CG 216
230#define IMX7D_I2C2_ROOT_DIV 217
231#define IMX7D_I2C3_ROOT_CLK 218
232#define IMX7D_I2C3_ROOT_SRC 219
233#define IMX7D_I2C3_ROOT_CG 220
234#define IMX7D_I2C3_ROOT_DIV 221
235#define IMX7D_I2C4_ROOT_CLK 222
236#define IMX7D_I2C4_ROOT_SRC 223
237#define IMX7D_I2C4_ROOT_CG 224
238#define IMX7D_I2C4_ROOT_DIV 225
239#define IMX7D_UART1_ROOT_CLK 226
240#define IMX7D_UART1_ROOT_SRC 227
241#define IMX7D_UART1_ROOT_CG 228
242#define IMX7D_UART1_ROOT_DIV 229
243#define IMX7D_UART2_ROOT_CLK 230
244#define IMX7D_UART2_ROOT_SRC 231
245#define IMX7D_UART2_ROOT_CG 232
246#define IMX7D_UART2_ROOT_DIV 233
247#define IMX7D_UART3_ROOT_CLK 234
248#define IMX7D_UART3_ROOT_SRC 235
249#define IMX7D_UART3_ROOT_CG 236
250#define IMX7D_UART3_ROOT_DIV 237
251#define IMX7D_UART4_ROOT_CLK 238
252#define IMX7D_UART4_ROOT_SRC 239
253#define IMX7D_UART4_ROOT_CG 240
254#define IMX7D_UART4_ROOT_DIV 241
255#define IMX7D_UART5_ROOT_CLK 242
256#define IMX7D_UART5_ROOT_SRC 243
257#define IMX7D_UART5_ROOT_CG 244
258#define IMX7D_UART5_ROOT_DIV 245
259#define IMX7D_UART6_ROOT_CLK 246
260#define IMX7D_UART6_ROOT_SRC 247
261#define IMX7D_UART6_ROOT_CG 248
262#define IMX7D_UART6_ROOT_DIV 249
263#define IMX7D_UART7_ROOT_CLK 250
264#define IMX7D_UART7_ROOT_SRC 251
265#define IMX7D_UART7_ROOT_CG 252
266#define IMX7D_UART7_ROOT_DIV 253
267#define IMX7D_ECSPI1_ROOT_CLK 254
268#define IMX7D_ECSPI1_ROOT_SRC 255
269#define IMX7D_ECSPI1_ROOT_CG 256
270#define IMX7D_ECSPI1_ROOT_DIV 257
271#define IMX7D_ECSPI2_ROOT_CLK 258
272#define IMX7D_ECSPI2_ROOT_SRC 259
273#define IMX7D_ECSPI2_ROOT_CG 260
274#define IMX7D_ECSPI2_ROOT_DIV 261
275#define IMX7D_ECSPI3_ROOT_CLK 262
276#define IMX7D_ECSPI3_ROOT_SRC 263
277#define IMX7D_ECSPI3_ROOT_CG 264
278#define IMX7D_ECSPI3_ROOT_DIV 265
279#define IMX7D_ECSPI4_ROOT_CLK 266
280#define IMX7D_ECSPI4_ROOT_SRC 267
281#define IMX7D_ECSPI4_ROOT_CG 268
282#define IMX7D_ECSPI4_ROOT_DIV 269
283#define IMX7D_PWM1_ROOT_CLK 270
284#define IMX7D_PWM1_ROOT_SRC 271
285#define IMX7D_PWM1_ROOT_CG 272
286#define IMX7D_PWM1_ROOT_DIV 273
287#define IMX7D_PWM2_ROOT_CLK 274
288#define IMX7D_PWM2_ROOT_SRC 275
289#define IMX7D_PWM2_ROOT_CG 276
290#define IMX7D_PWM2_ROOT_DIV 277
291#define IMX7D_PWM3_ROOT_CLK 278
292#define IMX7D_PWM3_ROOT_SRC 279
293#define IMX7D_PWM3_ROOT_CG 280
294#define IMX7D_PWM3_ROOT_DIV 281
295#define IMX7D_PWM4_ROOT_CLK 282
296#define IMX7D_PWM4_ROOT_SRC 283
297#define IMX7D_PWM4_ROOT_CG 284
298#define IMX7D_PWM4_ROOT_DIV 285
299#define IMX7D_FLEXTIMER1_ROOT_CLK 286
300#define IMX7D_FLEXTIMER1_ROOT_SRC 287
301#define IMX7D_FLEXTIMER1_ROOT_CG 288
302#define IMX7D_FLEXTIMER1_ROOT_DIV 289
303#define IMX7D_FLEXTIMER2_ROOT_CLK 290
304#define IMX7D_FLEXTIMER2_ROOT_SRC 291
305#define IMX7D_FLEXTIMER2_ROOT_CG 292
306#define IMX7D_FLEXTIMER2_ROOT_DIV 293
307#define IMX7D_SIM1_ROOT_CLK 294
308#define IMX7D_SIM1_ROOT_SRC 295
309#define IMX7D_SIM1_ROOT_CG 296
310#define IMX7D_SIM1_ROOT_DIV 297
311#define IMX7D_SIM2_ROOT_CLK 298
312#define IMX7D_SIM2_ROOT_SRC 299
313#define IMX7D_SIM2_ROOT_CG 300
314#define IMX7D_SIM2_ROOT_DIV 301
315#define IMX7D_GPT1_ROOT_CLK 302
316#define IMX7D_GPT1_ROOT_SRC 303
317#define IMX7D_GPT1_ROOT_CG 304
318#define IMX7D_GPT1_ROOT_DIV 305
319#define IMX7D_GPT2_ROOT_CLK 306
320#define IMX7D_GPT2_ROOT_SRC 307
321#define IMX7D_GPT2_ROOT_CG 308
322#define IMX7D_GPT2_ROOT_DIV 309
323#define IMX7D_GPT3_ROOT_CLK 310
324#define IMX7D_GPT3_ROOT_SRC 311
325#define IMX7D_GPT3_ROOT_CG 312
326#define IMX7D_GPT3_ROOT_DIV 313
327#define IMX7D_GPT4_ROOT_CLK 314
328#define IMX7D_GPT4_ROOT_SRC 315
329#define IMX7D_GPT4_ROOT_CG 316
330#define IMX7D_GPT4_ROOT_DIV 317
331#define IMX7D_TRACE_ROOT_CLK 318
332#define IMX7D_TRACE_ROOT_SRC 319
333#define IMX7D_TRACE_ROOT_CG 320
334#define IMX7D_TRACE_ROOT_DIV 321
335#define IMX7D_WDOG1_ROOT_CLK 322
336#define IMX7D_WDOG_ROOT_SRC 323
337#define IMX7D_WDOG_ROOT_CG 324
338#define IMX7D_WDOG_ROOT_DIV 325
339#define IMX7D_CSI_MCLK_ROOT_CLK 326
340#define IMX7D_CSI_MCLK_ROOT_SRC 327
341#define IMX7D_CSI_MCLK_ROOT_CG 328
342#define IMX7D_CSI_MCLK_ROOT_DIV 329
343#define IMX7D_AUDIO_MCLK_ROOT_CLK 330
344#define IMX7D_AUDIO_MCLK_ROOT_SRC 331
345#define IMX7D_AUDIO_MCLK_ROOT_CG 332
346#define IMX7D_AUDIO_MCLK_ROOT_DIV 333
347#define IMX7D_WRCLK_ROOT_CLK 334
348#define IMX7D_WRCLK_ROOT_SRC 335
349#define IMX7D_WRCLK_ROOT_CG 336
350#define IMX7D_WRCLK_ROOT_DIV 337
351#define IMX7D_CLKO1_ROOT_SRC 338
352#define IMX7D_CLKO1_ROOT_CG 339
353#define IMX7D_CLKO1_ROOT_DIV 340
354#define IMX7D_CLKO2_ROOT_SRC 341
355#define IMX7D_CLKO2_ROOT_CG 342
356#define IMX7D_CLKO2_ROOT_DIV 343
357#define IMX7D_MAIN_AXI_ROOT_PRE_DIV 344
358#define IMX7D_DISP_AXI_ROOT_PRE_DIV 345
359#define IMX7D_ENET_AXI_ROOT_PRE_DIV 346
360#define IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV 347
361#define IMX7D_AHB_CHANNEL_ROOT_PRE_DIV 348
362#define IMX7D_USB_HSIC_ROOT_PRE_DIV 349
363#define IMX7D_PCIE_CTRL_ROOT_PRE_DIV 350
364#define IMX7D_PCIE_PHY_ROOT_PRE_DIV 351
365#define IMX7D_EPDC_PIXEL_ROOT_PRE_DIV 352
366#define IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV 353
367#define IMX7D_MIPI_DSI_ROOT_PRE_DIV 354
368#define IMX7D_MIPI_CSI_ROOT_PRE_DIV 355
369#define IMX7D_MIPI_DPHY_ROOT_PRE_DIV 356
370#define IMX7D_SAI1_ROOT_PRE_DIV 357
371#define IMX7D_SAI2_ROOT_PRE_DIV 358
372#define IMX7D_SAI3_ROOT_PRE_DIV 359
373#define IMX7D_SPDIF_ROOT_PRE_DIV 360
374#define IMX7D_ENET1_REF_ROOT_PRE_DIV 361
375#define IMX7D_ENET1_TIME_ROOT_PRE_DIV 362
376#define IMX7D_ENET2_REF_ROOT_PRE_DIV 363
377#define IMX7D_ENET2_TIME_ROOT_PRE_DIV 364
378#define IMX7D_ENET_PHY_REF_ROOT_PRE_DIV 365
379#define IMX7D_EIM_ROOT_PRE_DIV 366
380#define IMX7D_NAND_ROOT_PRE_DIV 367
381#define IMX7D_QSPI_ROOT_PRE_DIV 368
382#define IMX7D_USDHC1_ROOT_PRE_DIV 369
383#define IMX7D_USDHC2_ROOT_PRE_DIV 370
384#define IMX7D_USDHC3_ROOT_PRE_DIV 371
385#define IMX7D_CAN1_ROOT_PRE_DIV 372
386#define IMX7D_CAN2_ROOT_PRE_DIV 373
387#define IMX7D_I2C1_ROOT_PRE_DIV 374
388#define IMX7D_I2C2_ROOT_PRE_DIV 375
389#define IMX7D_I2C3_ROOT_PRE_DIV 376
390#define IMX7D_I2C4_ROOT_PRE_DIV 377
391#define IMX7D_UART1_ROOT_PRE_DIV 378
392#define IMX7D_UART2_ROOT_PRE_DIV 379
393#define IMX7D_UART3_ROOT_PRE_DIV 380
394#define IMX7D_UART4_ROOT_PRE_DIV 381
395#define IMX7D_UART5_ROOT_PRE_DIV 382
396#define IMX7D_UART6_ROOT_PRE_DIV 383
397#define IMX7D_UART7_ROOT_PRE_DIV 384
398#define IMX7D_ECSPI1_ROOT_PRE_DIV 385
399#define IMX7D_ECSPI2_ROOT_PRE_DIV 386
400#define IMX7D_ECSPI3_ROOT_PRE_DIV 387
401#define IMX7D_ECSPI4_ROOT_PRE_DIV 388
402#define IMX7D_PWM1_ROOT_PRE_DIV 389
403#define IMX7D_PWM2_ROOT_PRE_DIV 390
404#define IMX7D_PWM3_ROOT_PRE_DIV 391
405#define IMX7D_PWM4_ROOT_PRE_DIV 392
406#define IMX7D_FLEXTIMER1_ROOT_PRE_DIV 393
407#define IMX7D_FLEXTIMER2_ROOT_PRE_DIV 394
408#define IMX7D_SIM1_ROOT_PRE_DIV 395
409#define IMX7D_SIM2_ROOT_PRE_DIV 396
410#define IMX7D_GPT1_ROOT_PRE_DIV 397
411#define IMX7D_GPT2_ROOT_PRE_DIV 398
412#define IMX7D_GPT3_ROOT_PRE_DIV 399
413#define IMX7D_GPT4_ROOT_PRE_DIV 400
414#define IMX7D_TRACE_ROOT_PRE_DIV 401
415#define IMX7D_WDOG_ROOT_PRE_DIV 402
416#define IMX7D_CSI_MCLK_ROOT_PRE_DIV 403
417#define IMX7D_AUDIO_MCLK_ROOT_PRE_DIV 404
418#define IMX7D_WRCLK_ROOT_PRE_DIV 405
419#define IMX7D_CLKO1_ROOT_PRE_DIV 406
420#define IMX7D_CLKO2_ROOT_PRE_DIV 407
421#define IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV 408
422#define IMX7D_DRAM_ALT_ROOT_PRE_DIV 409
423#define IMX7D_LVDS1_IN_CLK 410
424#define IMX7D_LVDS1_OUT_SEL 411
425#define IMX7D_LVDS1_OUT_CLK 412
426#define IMX7D_CLK_DUMMY 413
427#define IMX7D_GPT_3M_CLK 414
428#define IMX7D_OCRAM_CLK 415
429#define IMX7D_OCRAM_S_CLK 416
430#define IMX7D_WDOG2_ROOT_CLK 417
431#define IMX7D_WDOG3_ROOT_CLK 418
432#define IMX7D_WDOG4_ROOT_CLK 419
433#define IMX7D_SDMA_CORE_CLK 420
434#define IMX7D_USB1_MAIN_480M_CLK 421
435#define IMX7D_USB_CTRL_CLK 422
436#define IMX7D_USB_PHY1_CLK 423
437#define IMX7D_USB_PHY2_CLK 424
438#define IMX7D_IPG_ROOT_CLK 425
439#define IMX7D_SAI1_IPG_CLK 426
440#define IMX7D_SAI2_IPG_CLK 427
441#define IMX7D_SAI3_IPG_CLK 428
442#define IMX7D_PLL_AUDIO_TEST_DIV 429
443#define IMX7D_PLL_AUDIO_POST_DIV 430
444#define IMX7D_PLL_VIDEO_TEST_DIV 431
445#define IMX7D_PLL_VIDEO_POST_DIV 432
446#define IMX7D_MU_ROOT_CLK 433
447#define IMX7D_SEMA4_HS_ROOT_CLK 434
448#define IMX7D_PLL_DRAM_TEST_DIV 435
449#define IMX7D_CLK_END 436
450#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/jz4740-cgu.h
new file mode 100644
index 000000000000..43153d3e9bd2
--- /dev/null
+++ b/include/dt-bindings/clock/jz4740-cgu.h
@@ -0,0 +1,37 @@
1/*
2 * This header provides clock numbers for the ingenic,jz4740-cgu DT binding.
3 *
4 * They are roughly ordered as:
5 * - external clocks
6 * - PLLs
7 * - muxes/dividers in the order they appear in the jz4740 programmers manual
8 * - gates in order of their bit in the CLKGR* registers
9 */
10
11#ifndef __DT_BINDINGS_CLOCK_JZ4740_CGU_H__
12#define __DT_BINDINGS_CLOCK_JZ4740_CGU_H__
13
14#define JZ4740_CLK_EXT 0
15#define JZ4740_CLK_RTC 1
16#define JZ4740_CLK_PLL 2
17#define JZ4740_CLK_PLL_HALF 3
18#define JZ4740_CLK_CCLK 4
19#define JZ4740_CLK_HCLK 5
20#define JZ4740_CLK_PCLK 6
21#define JZ4740_CLK_MCLK 7
22#define JZ4740_CLK_LCD 8
23#define JZ4740_CLK_LCD_PCLK 9
24#define JZ4740_CLK_I2S 10
25#define JZ4740_CLK_SPI 11
26#define JZ4740_CLK_MMC 12
27#define JZ4740_CLK_UHC 13
28#define JZ4740_CLK_UDC 14
29#define JZ4740_CLK_UART0 15
30#define JZ4740_CLK_UART1 16
31#define JZ4740_CLK_DMA 17
32#define JZ4740_CLK_IPU 18
33#define JZ4740_CLK_ADC 19
34#define JZ4740_CLK_I2C 20
35#define JZ4740_CLK_AIC 21
36
37#endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */
diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h
new file mode 100644
index 000000000000..467165e3cfee
--- /dev/null
+++ b/include/dt-bindings/clock/jz4780-cgu.h
@@ -0,0 +1,88 @@
1/*
2 * This header provides clock numbers for the ingenic,jz4780-cgu DT binding.
3 *
4 * They are roughly ordered as:
5 * - external clocks
6 * - PLLs
7 * - muxes/dividers in the order they appear in the jz4780 programmers manual
8 * - gates in order of their bit in the CLKGR* registers
9 */
10
11#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
12#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
13
14#define JZ4780_CLK_EXCLK 0
15#define JZ4780_CLK_RTCLK 1
16#define JZ4780_CLK_APLL 2
17#define JZ4780_CLK_MPLL 3
18#define JZ4780_CLK_EPLL 4
19#define JZ4780_CLK_VPLL 5
20#define JZ4780_CLK_OTGPHY 6
21#define JZ4780_CLK_SCLKA 7
22#define JZ4780_CLK_CPUMUX 8
23#define JZ4780_CLK_CPU 9
24#define JZ4780_CLK_L2CACHE 10
25#define JZ4780_CLK_AHB0 11
26#define JZ4780_CLK_AHB2PMUX 12
27#define JZ4780_CLK_AHB2 13
28#define JZ4780_CLK_PCLK 14
29#define JZ4780_CLK_DDR 15
30#define JZ4780_CLK_VPU 16
31#define JZ4780_CLK_I2SPLL 17
32#define JZ4780_CLK_I2S 18
33#define JZ4780_CLK_LCD0PIXCLK 19
34#define JZ4780_CLK_LCD1PIXCLK 20
35#define JZ4780_CLK_MSCMUX 21
36#define JZ4780_CLK_MSC0 22
37#define JZ4780_CLK_MSC1 23
38#define JZ4780_CLK_MSC2 24
39#define JZ4780_CLK_UHC 25
40#define JZ4780_CLK_SSIPLL 26
41#define JZ4780_CLK_SSI 27
42#define JZ4780_CLK_CIMMCLK 28
43#define JZ4780_CLK_PCMPLL 29
44#define JZ4780_CLK_PCM 30
45#define JZ4780_CLK_GPU 31
46#define JZ4780_CLK_HDMI 32
47#define JZ4780_CLK_BCH 33
48#define JZ4780_CLK_NEMC 34
49#define JZ4780_CLK_OTG0 35
50#define JZ4780_CLK_SSI0 36
51#define JZ4780_CLK_SMB0 37
52#define JZ4780_CLK_SMB1 38
53#define JZ4780_CLK_SCC 39
54#define JZ4780_CLK_AIC 40
55#define JZ4780_CLK_TSSI0 41
56#define JZ4780_CLK_OWI 42
57#define JZ4780_CLK_KBC 43
58#define JZ4780_CLK_SADC 44
59#define JZ4780_CLK_UART0 45
60#define JZ4780_CLK_UART1 46
61#define JZ4780_CLK_UART2 47
62#define JZ4780_CLK_UART3 48
63#define JZ4780_CLK_SSI1 49
64#define JZ4780_CLK_SSI2 50
65#define JZ4780_CLK_PDMA 51
66#define JZ4780_CLK_GPS 52
67#define JZ4780_CLK_MAC 53
68#define JZ4780_CLK_SMB2 54
69#define JZ4780_CLK_CIM 55
70#define JZ4780_CLK_LCD 56
71#define JZ4780_CLK_TVE 57
72#define JZ4780_CLK_IPU 58
73#define JZ4780_CLK_DDR0 59
74#define JZ4780_CLK_DDR1 60
75#define JZ4780_CLK_SMB3 61
76#define JZ4780_CLK_TSSI1 62
77#define JZ4780_CLK_COMPRESS 63
78#define JZ4780_CLK_AIC1 64
79#define JZ4780_CLK_GPVLC 65
80#define JZ4780_CLK_OTG1 66
81#define JZ4780_CLK_UART4 67
82#define JZ4780_CLK_AHBMON 68
83#define JZ4780_CLK_SMB4 69
84#define JZ4780_CLK_DES 70
85#define JZ4780_CLK_X2D 71
86#define JZ4780_CLK_CORE1 72
87
88#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */
diff --git a/include/dt-bindings/clock/lpc18xx-ccu.h b/include/dt-bindings/clock/lpc18xx-ccu.h
new file mode 100644
index 000000000000..bbfe00b6ab7d
--- /dev/null
+++ b/include/dt-bindings/clock/lpc18xx-ccu.h
@@ -0,0 +1,74 @@
1/*
2 * Copyright (c) 2015 Joachim Eastwood <manabian@gmail.com>
3 *
4 * This code is released using a dual license strategy: BSD/GPL
5 * You can choose the licence that better fits your requirements.
6 *
7 * Released under the terms of 3-clause BSD License
8 * Released under the terms of GNU General Public License Version 2.0
9 *
10 */
11
12/* Clock Control Unit 1 (CCU1) clock offsets */
13#define CLK_APB3_BUS 0x100
14#define CLK_APB3_I2C1 0x108
15#define CLK_APB3_DAC 0x110
16#define CLK_APB3_ADC0 0x118
17#define CLK_APB3_ADC1 0x120
18#define CLK_APB3_CAN0 0x128
19#define CLK_APB1_BUS 0x200
20#define CLK_APB1_MOTOCON_PWM 0x208
21#define CLK_APB1_I2C0 0x210
22#define CLK_APB1_I2S 0x218
23#define CLK_APB1_CAN1 0x220
24#define CLK_SPIFI 0x300
25#define CLK_CPU_BUS 0x400
26#define CLK_CPU_SPIFI 0x408
27#define CLK_CPU_GPIO 0x410
28#define CLK_CPU_LCD 0x418
29#define CLK_CPU_ETHERNET 0x420
30#define CLK_CPU_USB0 0x428
31#define CLK_CPU_EMC 0x430
32#define CLK_CPU_SDIO 0x438
33#define CLK_CPU_DMA 0x440
34#define CLK_CPU_CORE 0x448
35#define CLK_CPU_SCT 0x468
36#define CLK_CPU_USB1 0x470
37#define CLK_CPU_EMCDIV 0x478
38#define CLK_CPU_FLASHA 0x480
39#define CLK_CPU_FLASHB 0x488
40#define CLK_CPU_M0APP 0x490
41#define CLK_CPU_ADCHS 0x498
42#define CLK_CPU_EEPROM 0x4a0
43#define CLK_CPU_WWDT 0x500
44#define CLK_CPU_UART0 0x508
45#define CLK_CPU_UART1 0x510
46#define CLK_CPU_SSP0 0x518
47#define CLK_CPU_TIMER0 0x520
48#define CLK_CPU_TIMER1 0x528
49#define CLK_CPU_SCU 0x530
50#define CLK_CPU_CREG 0x538
51#define CLK_CPU_RITIMER 0x600
52#define CLK_CPU_UART2 0x608
53#define CLK_CPU_UART3 0x610
54#define CLK_CPU_TIMER2 0x618
55#define CLK_CPU_TIMER3 0x620
56#define CLK_CPU_SSP1 0x628
57#define CLK_CPU_QEI 0x630
58#define CLK_PERIPH_BUS 0x700
59#define CLK_PERIPH_CORE 0x710
60#define CLK_PERIPH_SGPIO 0x718
61#define CLK_USB0 0x800
62#define CLK_USB1 0x900
63#define CLK_SPI 0xA00
64#define CLK_ADCHS 0xB00
65
66/* Clock Control Unit 2 (CCU2) clock offsets */
67#define CLK_AUDIO 0x100
68#define CLK_APB2_UART3 0x200
69#define CLK_APB2_UART2 0x300
70#define CLK_APB0_UART1 0x400
71#define CLK_APB0_UART0 0x500
72#define CLK_APB2_SSP1 0x600
73#define CLK_APB0_SSP0 0x700
74#define CLK_SDIO 0x800
diff --git a/include/dt-bindings/clock/lpc18xx-cgu.h b/include/dt-bindings/clock/lpc18xx-cgu.h
new file mode 100644
index 000000000000..6e57c6d2ca66
--- /dev/null
+++ b/include/dt-bindings/clock/lpc18xx-cgu.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (c) 2015 Joachim Eastwood <manabian@gmail.com>
3 *
4 * This code is released using a dual license strategy: BSD/GPL
5 * You can choose the licence that better fits your requirements.
6 *
7 * Released under the terms of 3-clause BSD License
8 * Released under the terms of GNU General Public License Version 2.0
9 *
10 */
11
12/* LPC18xx/43xx base clock ids */
13#define BASE_SAFE_CLK 0
14#define BASE_USB0_CLK 1
15#define BASE_PERIPH_CLK 2
16#define BASE_USB1_CLK 3
17#define BASE_CPU_CLK 4
18#define BASE_SPIFI_CLK 5
19#define BASE_SPI_CLK 6
20#define BASE_PHY_RX_CLK 7
21#define BASE_PHY_TX_CLK 8
22#define BASE_APB1_CLK 9
23#define BASE_APB3_CLK 10
24#define BASE_LCD_CLK 11
25#define BASE_ADCHS_CLK 12
26#define BASE_SDIO_CLK 13
27#define BASE_SSP0_CLK 14
28#define BASE_SSP1_CLK 15
29#define BASE_UART0_CLK 16
30#define BASE_UART1_CLK 17
31#define BASE_UART2_CLK 18
32#define BASE_UART3_CLK 19
33#define BASE_OUT_CLK 20
34#define BASE_RES1_CLK 21
35#define BASE_RES2_CLK 22
36#define BASE_RES3_CLK 23
37#define BASE_RES4_CLK 24
38#define BASE_AUDIO_CLK 25
39#define BASE_CGU_OUT0_CLK 26
40#define BASE_CGU_OUT1_CLK 27
41#define BASE_CLK_MAX (BASE_CGU_OUT1_CLK + 1)
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 591f7fba89e2..7a510384a82a 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -48,6 +48,7 @@
48#define MMP2_CLK_SSP1 78 48#define MMP2_CLK_SSP1 78
49#define MMP2_CLK_SSP2 79 49#define MMP2_CLK_SSP2 79
50#define MMP2_CLK_SSP3 80 50#define MMP2_CLK_SSP3 80
51#define MMP2_CLK_TIMER 81
51 52
52/* axi periphrals */ 53/* axi periphrals */
53#define MMP2_CLK_SDH0 101 54#define MMP2_CLK_SDH0 101
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index 79630b9d74b8..3e45bdfe1aa4 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -18,7 +18,9 @@
18#define PXA168_CLK_PLL1_13_1_5 18 18#define PXA168_CLK_PLL1_13_1_5 18
19#define PXA168_CLK_PLL1_2_1_5 19 19#define PXA168_CLK_PLL1_2_1_5 19
20#define PXA168_CLK_PLL1_3_16 20 20#define PXA168_CLK_PLL1_3_16 20
21#define PXA168_CLK_PLL1_192 21
21#define PXA168_CLK_UART_PLL 27 22#define PXA168_CLK_UART_PLL 27
23#define PXA168_CLK_USB_PLL 28
22 24
23/* apb periphrals */ 25/* apb periphrals */
24#define PXA168_CLK_TWSI0 60 26#define PXA168_CLK_TWSI0 60
@@ -40,6 +42,7 @@
40#define PXA168_CLK_SSP2 76 42#define PXA168_CLK_SSP2 76
41#define PXA168_CLK_SSP3 77 43#define PXA168_CLK_SSP3 77
42#define PXA168_CLK_SSP4 78 44#define PXA168_CLK_SSP4 78
45#define PXA168_CLK_TIMER 79
43 46
44/* axi periphrals */ 47/* axi periphrals */
45#define PXA168_CLK_DFC 100 48#define PXA168_CLK_DFC 100
diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h
new file mode 100644
index 000000000000..d4f2e18919ff
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa1928.h
@@ -0,0 +1,57 @@
1#ifndef __DTS_MARVELL_PXA1928_CLOCK_H
2#define __DTS_MARVELL_PXA1928_CLOCK_H
3
4/*
5 * Clock ID values here correspond to the control register offset/4.
6 */
7
8/* apb peripherals */
9#define PXA1928_CLK_RTC 0x00
10#define PXA1928_CLK_TWSI0 0x01
11#define PXA1928_CLK_TWSI1 0x02
12#define PXA1928_CLK_TWSI2 0x03
13#define PXA1928_CLK_TWSI3 0x04
14#define PXA1928_CLK_OWIRE 0x05
15#define PXA1928_CLK_KPC 0x06
16#define PXA1928_CLK_TB_ROTARY 0x07
17#define PXA1928_CLK_SW_JTAG 0x08
18#define PXA1928_CLK_TIMER1 0x09
19#define PXA1928_CLK_UART0 0x0b
20#define PXA1928_CLK_UART1 0x0c
21#define PXA1928_CLK_UART2 0x0d
22#define PXA1928_CLK_GPIO 0x0e
23#define PXA1928_CLK_PWM0 0x0f
24#define PXA1928_CLK_PWM1 0x10
25#define PXA1928_CLK_PWM2 0x11
26#define PXA1928_CLK_PWM3 0x12
27#define PXA1928_CLK_SSP0 0x13
28#define PXA1928_CLK_SSP1 0x14
29#define PXA1928_CLK_SSP2 0x15
30
31#define PXA1928_CLK_TWSI4 0x1f
32#define PXA1928_CLK_TWSI5 0x20
33#define PXA1928_CLK_UART3 0x22
34#define PXA1928_CLK_THSENS_GLOB 0x24
35#define PXA1928_CLK_THSENS_CPU 0x26
36#define PXA1928_CLK_THSENS_VPU 0x27
37#define PXA1928_CLK_THSENS_GC 0x28
38#define PXA1928_APBC_NR_CLKS 0x30
39
40
41/* axi peripherals */
42#define PXA1928_CLK_SDH0 0x15
43#define PXA1928_CLK_SDH1 0x16
44#define PXA1928_CLK_USB 0x17
45#define PXA1928_CLK_NAND 0x18
46#define PXA1928_CLK_DMA 0x19
47
48#define PXA1928_CLK_SDH2 0x3a
49#define PXA1928_CLK_SDH3 0x3b
50#define PXA1928_CLK_HSIC 0x3e
51#define PXA1928_CLK_SDH4 0x57
52#define PXA1928_CLK_GC3D 0x5d
53#define PXA1928_CLK_GC2D 0x5f
54
55#define PXA1928_APMU_NR_CLKS 0x60
56
57#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
index 719cffb2bea2..135082a0b62f 100644
--- a/include/dt-bindings/clock/marvell,pxa910.h
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -18,7 +18,9 @@
18#define PXA910_CLK_PLL1_13_1_5 18 18#define PXA910_CLK_PLL1_13_1_5 18
19#define PXA910_CLK_PLL1_2_1_5 19 19#define PXA910_CLK_PLL1_2_1_5 19
20#define PXA910_CLK_PLL1_3_16 20 20#define PXA910_CLK_PLL1_3_16 20
21#define PXA910_CLK_PLL1_192 21
21#define PXA910_CLK_UART_PLL 27 22#define PXA910_CLK_UART_PLL 27
23#define PXA910_CLK_USB_PLL 28
22 24
23/* apb periphrals */ 25/* apb periphrals */
24#define PXA910_CLK_TWSI0 60 26#define PXA910_CLK_TWSI0 60
@@ -37,6 +39,8 @@
37#define PXA910_CLK_UART2 73 39#define PXA910_CLK_UART2 73
38#define PXA910_CLK_SSP0 74 40#define PXA910_CLK_SSP0 74
39#define PXA910_CLK_SSP1 75 41#define PXA910_CLK_SSP1 75
42#define PXA910_CLK_TIMER0 76
43#define PXA910_CLK_TIMER1 77
40 44
41/* axi periphrals */ 45/* axi periphrals */
42#define PXA910_CLK_DFC 100 46#define PXA910_CLK_DFC 100
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
new file mode 100644
index 000000000000..bd2720d58e0c
--- /dev/null
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -0,0 +1,25 @@
1/*
2 * Meson8b clock tree IDs
3 */
4
5#ifndef __MESON8B_CLKC_H
6#define __MESON8B_CLKC_H
7
8#define CLKID_UNUSED 0
9#define CLKID_XTAL 1
10#define CLKID_PLL_FIXED 2
11#define CLKID_PLL_VID 3
12#define CLKID_PLL_SYS 4
13#define CLKID_FCLK_DIV2 5
14#define CLKID_FCLK_DIV3 6
15#define CLKID_FCLK_DIV4 7
16#define CLKID_FCLK_DIV5 8
17#define CLKID_FCLK_DIV7 9
18#define CLKID_CLK81 10
19#define CLKID_MALI 11
20#define CLKID_CPUCLK 12
21#define CLKID_ZERO 13
22
23#define CLK_NR_CLKS (CLKID_ZERO + 1)
24
25#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt8135-clk.h b/include/dt-bindings/clock/mt8135-clk.h
new file mode 100644
index 000000000000..6dac6c091dd2
--- /dev/null
+++ b/include/dt-bindings/clock/mt8135-clk.h
@@ -0,0 +1,194 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: James Liao <jamesjj.liao@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _DT_BINDINGS_CLK_MT8135_H
16#define _DT_BINDINGS_CLK_MT8135_H
17
18/* TOPCKGEN */
19
20#define CLK_TOP_DSI0_LNTC_DSICLK 1
21#define CLK_TOP_HDMITX_CLKDIG_CTS 2
22#define CLK_TOP_CLKPH_MCK 3
23#define CLK_TOP_CPUM_TCK_IN 4
24#define CLK_TOP_MAINPLL_806M 5
25#define CLK_TOP_MAINPLL_537P3M 6
26#define CLK_TOP_MAINPLL_322P4M 7
27#define CLK_TOP_MAINPLL_230P3M 8
28#define CLK_TOP_UNIVPLL_624M 9
29#define CLK_TOP_UNIVPLL_416M 10
30#define CLK_TOP_UNIVPLL_249P6M 11
31#define CLK_TOP_UNIVPLL_178P3M 12
32#define CLK_TOP_UNIVPLL_48M 13
33#define CLK_TOP_MMPLL_D2 14
34#define CLK_TOP_MMPLL_D3 15
35#define CLK_TOP_MMPLL_D5 16
36#define CLK_TOP_MMPLL_D7 17
37#define CLK_TOP_MMPLL_D4 18
38#define CLK_TOP_MMPLL_D6 19
39#define CLK_TOP_SYSPLL_D2 20
40#define CLK_TOP_SYSPLL_D4 21
41#define CLK_TOP_SYSPLL_D6 22
42#define CLK_TOP_SYSPLL_D8 23
43#define CLK_TOP_SYSPLL_D10 24
44#define CLK_TOP_SYSPLL_D12 25
45#define CLK_TOP_SYSPLL_D16 26
46#define CLK_TOP_SYSPLL_D24 27
47#define CLK_TOP_SYSPLL_D3 28
48#define CLK_TOP_SYSPLL_D2P5 29
49#define CLK_TOP_SYSPLL_D5 30
50#define CLK_TOP_SYSPLL_D3P5 31
51#define CLK_TOP_UNIVPLL1_D2 32
52#define CLK_TOP_UNIVPLL1_D4 33
53#define CLK_TOP_UNIVPLL1_D6 34
54#define CLK_TOP_UNIVPLL1_D8 35
55#define CLK_TOP_UNIVPLL1_D10 36
56#define CLK_TOP_UNIVPLL2_D2 37
57#define CLK_TOP_UNIVPLL2_D4 38
58#define CLK_TOP_UNIVPLL2_D6 39
59#define CLK_TOP_UNIVPLL2_D8 40
60#define CLK_TOP_UNIVPLL_D3 41
61#define CLK_TOP_UNIVPLL_D5 42
62#define CLK_TOP_UNIVPLL_D7 43
63#define CLK_TOP_UNIVPLL_D10 44
64#define CLK_TOP_UNIVPLL_D26 45
65#define CLK_TOP_APLL 46
66#define CLK_TOP_APLL_D4 47
67#define CLK_TOP_APLL_D8 48
68#define CLK_TOP_APLL_D16 49
69#define CLK_TOP_APLL_D24 50
70#define CLK_TOP_LVDSPLL_D2 51
71#define CLK_TOP_LVDSPLL_D4 52
72#define CLK_TOP_LVDSPLL_D8 53
73#define CLK_TOP_LVDSTX_CLKDIG_CT 54
74#define CLK_TOP_VPLL_DPIX 55
75#define CLK_TOP_TVHDMI_H 56
76#define CLK_TOP_HDMITX_CLKDIG_D2 57
77#define CLK_TOP_HDMITX_CLKDIG_D3 58
78#define CLK_TOP_TVHDMI_D2 59
79#define CLK_TOP_TVHDMI_D4 60
80#define CLK_TOP_MEMPLL_MCK_D4 61
81#define CLK_TOP_AXI_SEL 62
82#define CLK_TOP_SMI_SEL 63
83#define CLK_TOP_MFG_SEL 64
84#define CLK_TOP_IRDA_SEL 65
85#define CLK_TOP_CAM_SEL 66
86#define CLK_TOP_AUD_INTBUS_SEL 67
87#define CLK_TOP_JPG_SEL 68
88#define CLK_TOP_DISP_SEL 69
89#define CLK_TOP_MSDC30_1_SEL 70
90#define CLK_TOP_MSDC30_2_SEL 71
91#define CLK_TOP_MSDC30_3_SEL 72
92#define CLK_TOP_MSDC30_4_SEL 73
93#define CLK_TOP_USB20_SEL 74
94#define CLK_TOP_VENC_SEL 75
95#define CLK_TOP_SPI_SEL 76
96#define CLK_TOP_UART_SEL 77
97#define CLK_TOP_MEM_SEL 78
98#define CLK_TOP_CAMTG_SEL 79
99#define CLK_TOP_AUDIO_SEL 80
100#define CLK_TOP_FIX_SEL 81
101#define CLK_TOP_VDEC_SEL 82
102#define CLK_TOP_DDRPHYCFG_SEL 83
103#define CLK_TOP_DPILVDS_SEL 84
104#define CLK_TOP_PMICSPI_SEL 85
105#define CLK_TOP_MSDC30_0_SEL 86
106#define CLK_TOP_SMI_MFG_AS_SEL 87
107#define CLK_TOP_GCPU_SEL 88
108#define CLK_TOP_DPI1_SEL 89
109#define CLK_TOP_CCI_SEL 90
110#define CLK_TOP_APLL_SEL 91
111#define CLK_TOP_HDMIPLL_SEL 92
112#define CLK_TOP_NR_CLK 93
113
114/* APMIXED_SYS */
115
116#define CLK_APMIXED_ARMPLL1 1
117#define CLK_APMIXED_ARMPLL2 2
118#define CLK_APMIXED_MAINPLL 3
119#define CLK_APMIXED_UNIVPLL 4
120#define CLK_APMIXED_MMPLL 5
121#define CLK_APMIXED_MSDCPLL 6
122#define CLK_APMIXED_TVDPLL 7
123#define CLK_APMIXED_LVDSPLL 8
124#define CLK_APMIXED_AUDPLL 9
125#define CLK_APMIXED_VDECPLL 10
126#define CLK_APMIXED_NR_CLK 11
127
128/* INFRA_SYS */
129
130#define CLK_INFRA_PMIC_WRAP 1
131#define CLK_INFRA_PMICSPI 2
132#define CLK_INFRA_CCIF1_AP_CTRL 3
133#define CLK_INFRA_CCIF0_AP_CTRL 4
134#define CLK_INFRA_KP 5
135#define CLK_INFRA_CPUM 6
136#define CLK_INFRA_M4U 7
137#define CLK_INFRA_MFGAXI 8
138#define CLK_INFRA_DEVAPC 9
139#define CLK_INFRA_AUDIO 10
140#define CLK_INFRA_MFG_BUS 11
141#define CLK_INFRA_SMI 12
142#define CLK_INFRA_DBGCLK 13
143#define CLK_INFRA_NR_CLK 14
144
145/* PERI_SYS */
146
147#define CLK_PERI_I2C5 1
148#define CLK_PERI_I2C4 2
149#define CLK_PERI_I2C3 3
150#define CLK_PERI_I2C2 4
151#define CLK_PERI_I2C1 5
152#define CLK_PERI_I2C0 6
153#define CLK_PERI_UART3 7
154#define CLK_PERI_UART2 8
155#define CLK_PERI_UART1 9
156#define CLK_PERI_UART0 10
157#define CLK_PERI_IRDA 11
158#define CLK_PERI_NLI 12
159#define CLK_PERI_MD_HIF 13
160#define CLK_PERI_AP_HIF 14
161#define CLK_PERI_MSDC30_3 15
162#define CLK_PERI_MSDC30_2 16
163#define CLK_PERI_MSDC30_1 17
164#define CLK_PERI_MSDC20_2 18
165#define CLK_PERI_MSDC20_1 19
166#define CLK_PERI_AP_DMA 20
167#define CLK_PERI_USB1 21
168#define CLK_PERI_USB0 22
169#define CLK_PERI_PWM 23
170#define CLK_PERI_PWM7 24
171#define CLK_PERI_PWM6 25
172#define CLK_PERI_PWM5 26
173#define CLK_PERI_PWM4 27
174#define CLK_PERI_PWM3 28
175#define CLK_PERI_PWM2 29
176#define CLK_PERI_PWM1 30
177#define CLK_PERI_THERM 31
178#define CLK_PERI_NFI 32
179#define CLK_PERI_USBSLV 33
180#define CLK_PERI_USB1_MCU 34
181#define CLK_PERI_USB0_MCU 35
182#define CLK_PERI_GCPU 36
183#define CLK_PERI_FHCTL 37
184#define CLK_PERI_SPI1 38
185#define CLK_PERI_AUXADC 39
186#define CLK_PERI_PERI_PWRAP 40
187#define CLK_PERI_I2C6 41
188#define CLK_PERI_UART0_SEL 42
189#define CLK_PERI_UART1_SEL 43
190#define CLK_PERI_UART2_SEL 44
191#define CLK_PERI_UART3_SEL 45
192#define CLK_PERI_NR_CLK 46
193
194#endif /* _DT_BINDINGS_CLK_MT8135_H */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
new file mode 100644
index 000000000000..4ad76ed882ad
--- /dev/null
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -0,0 +1,235 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: James Liao <jamesjj.liao@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _DT_BINDINGS_CLK_MT8173_H
16#define _DT_BINDINGS_CLK_MT8173_H
17
18/* TOPCKGEN */
19
20#define CLK_TOP_CLKPH_MCK_O 1
21#define CLK_TOP_DPI 2
22#define CLK_TOP_USB_SYSPLL_125M 3
23#define CLK_TOP_HDMITX_DIG_CTS 4
24#define CLK_TOP_ARMCA7PLL_754M 5
25#define CLK_TOP_ARMCA7PLL_502M 6
26#define CLK_TOP_MAIN_H546M 7
27#define CLK_TOP_MAIN_H364M 8
28#define CLK_TOP_MAIN_H218P4M 9
29#define CLK_TOP_MAIN_H156M 10
30#define CLK_TOP_TVDPLL_445P5M 11
31#define CLK_TOP_TVDPLL_594M 12
32#define CLK_TOP_UNIV_624M 13
33#define CLK_TOP_UNIV_416M 14
34#define CLK_TOP_UNIV_249P6M 15
35#define CLK_TOP_UNIV_178P3M 16
36#define CLK_TOP_UNIV_48M 17
37#define CLK_TOP_CLKRTC_EXT 18
38#define CLK_TOP_CLKRTC_INT 19
39#define CLK_TOP_FPC 20
40#define CLK_TOP_HDMITXPLL_D2 21
41#define CLK_TOP_HDMITXPLL_D3 22
42#define CLK_TOP_ARMCA7PLL_D2 23
43#define CLK_TOP_ARMCA7PLL_D3 24
44#define CLK_TOP_APLL1 25
45#define CLK_TOP_APLL2 26
46#define CLK_TOP_DMPLL 27
47#define CLK_TOP_DMPLL_D2 28
48#define CLK_TOP_DMPLL_D4 29
49#define CLK_TOP_DMPLL_D8 30
50#define CLK_TOP_DMPLL_D16 31
51#define CLK_TOP_LVDSPLL_D2 32
52#define CLK_TOP_LVDSPLL_D4 33
53#define CLK_TOP_LVDSPLL_D8 34
54#define CLK_TOP_MMPLL 35
55#define CLK_TOP_MMPLL_D2 36
56#define CLK_TOP_MSDCPLL 37
57#define CLK_TOP_MSDCPLL_D2 38
58#define CLK_TOP_MSDCPLL_D4 39
59#define CLK_TOP_MSDCPLL2 40
60#define CLK_TOP_MSDCPLL2_D2 41
61#define CLK_TOP_MSDCPLL2_D4 42
62#define CLK_TOP_SYSPLL_D2 43
63#define CLK_TOP_SYSPLL1_D2 44
64#define CLK_TOP_SYSPLL1_D4 45
65#define CLK_TOP_SYSPLL1_D8 46
66#define CLK_TOP_SYSPLL1_D16 47
67#define CLK_TOP_SYSPLL_D3 48
68#define CLK_TOP_SYSPLL2_D2 49
69#define CLK_TOP_SYSPLL2_D4 50
70#define CLK_TOP_SYSPLL_D5 51
71#define CLK_TOP_SYSPLL3_D2 52
72#define CLK_TOP_SYSPLL3_D4 53
73#define CLK_TOP_SYSPLL_D7 54
74#define CLK_TOP_SYSPLL4_D2 55
75#define CLK_TOP_SYSPLL4_D4 56
76#define CLK_TOP_TVDPLL 57
77#define CLK_TOP_TVDPLL_D2 58
78#define CLK_TOP_TVDPLL_D4 59
79#define CLK_TOP_TVDPLL_D8 60
80#define CLK_TOP_TVDPLL_D16 61
81#define CLK_TOP_UNIVPLL_D2 62
82#define CLK_TOP_UNIVPLL1_D2 63
83#define CLK_TOP_UNIVPLL1_D4 64
84#define CLK_TOP_UNIVPLL1_D8 65
85#define CLK_TOP_UNIVPLL_D3 66
86#define CLK_TOP_UNIVPLL2_D2 67
87#define CLK_TOP_UNIVPLL2_D4 68
88#define CLK_TOP_UNIVPLL2_D8 69
89#define CLK_TOP_UNIVPLL_D5 70
90#define CLK_TOP_UNIVPLL3_D2 71
91#define CLK_TOP_UNIVPLL3_D4 72
92#define CLK_TOP_UNIVPLL3_D8 73
93#define CLK_TOP_UNIVPLL_D7 74
94#define CLK_TOP_UNIVPLL_D26 75
95#define CLK_TOP_UNIVPLL_D52 76
96#define CLK_TOP_VCODECPLL 77
97#define CLK_TOP_VCODECPLL_370P5 78
98#define CLK_TOP_VENCPLL 79
99#define CLK_TOP_VENCPLL_D2 80
100#define CLK_TOP_VENCPLL_D4 81
101#define CLK_TOP_AXI_SEL 82
102#define CLK_TOP_MEM_SEL 83
103#define CLK_TOP_DDRPHYCFG_SEL 84
104#define CLK_TOP_MM_SEL 85
105#define CLK_TOP_PWM_SEL 86
106#define CLK_TOP_VDEC_SEL 87
107#define CLK_TOP_VENC_SEL 88
108#define CLK_TOP_MFG_SEL 89
109#define CLK_TOP_CAMTG_SEL 90
110#define CLK_TOP_UART_SEL 91
111#define CLK_TOP_SPI_SEL 92
112#define CLK_TOP_USB20_SEL 93
113#define CLK_TOP_USB30_SEL 94
114#define CLK_TOP_MSDC50_0_H_SEL 95
115#define CLK_TOP_MSDC50_0_SEL 96
116#define CLK_TOP_MSDC30_1_SEL 97
117#define CLK_TOP_MSDC30_2_SEL 98
118#define CLK_TOP_MSDC30_3_SEL 99
119#define CLK_TOP_AUDIO_SEL 100
120#define CLK_TOP_AUD_INTBUS_SEL 101
121#define CLK_TOP_PMICSPI_SEL 102
122#define CLK_TOP_SCP_SEL 103
123#define CLK_TOP_ATB_SEL 104
124#define CLK_TOP_VENC_LT_SEL 105
125#define CLK_TOP_DPI0_SEL 106
126#define CLK_TOP_IRDA_SEL 107
127#define CLK_TOP_CCI400_SEL 108
128#define CLK_TOP_AUD_1_SEL 109
129#define CLK_TOP_AUD_2_SEL 110
130#define CLK_TOP_MEM_MFG_IN_SEL 111
131#define CLK_TOP_AXI_MFG_IN_SEL 112
132#define CLK_TOP_SCAM_SEL 113
133#define CLK_TOP_SPINFI_IFR_SEL 114
134#define CLK_TOP_HDMI_SEL 115
135#define CLK_TOP_DPILVDS_SEL 116
136#define CLK_TOP_MSDC50_2_H_SEL 117
137#define CLK_TOP_HDCP_SEL 118
138#define CLK_TOP_HDCP_24M_SEL 119
139#define CLK_TOP_RTC_SEL 120
140#define CLK_TOP_APLL1_DIV0 121
141#define CLK_TOP_APLL1_DIV1 122
142#define CLK_TOP_APLL1_DIV2 123
143#define CLK_TOP_APLL1_DIV3 124
144#define CLK_TOP_APLL1_DIV4 125
145#define CLK_TOP_APLL1_DIV5 126
146#define CLK_TOP_APLL2_DIV0 127
147#define CLK_TOP_APLL2_DIV1 128
148#define CLK_TOP_APLL2_DIV2 129
149#define CLK_TOP_APLL2_DIV3 130
150#define CLK_TOP_APLL2_DIV4 131
151#define CLK_TOP_APLL2_DIV5 132
152#define CLK_TOP_I2S0_M_SEL 133
153#define CLK_TOP_I2S1_M_SEL 134
154#define CLK_TOP_I2S2_M_SEL 135
155#define CLK_TOP_I2S3_M_SEL 136
156#define CLK_TOP_I2S3_B_SEL 137
157#define CLK_TOP_NR_CLK 138
158
159/* APMIXED_SYS */
160
161#define CLK_APMIXED_ARMCA15PLL 1
162#define CLK_APMIXED_ARMCA7PLL 2
163#define CLK_APMIXED_MAINPLL 3
164#define CLK_APMIXED_UNIVPLL 4
165#define CLK_APMIXED_MMPLL 5
166#define CLK_APMIXED_MSDCPLL 6
167#define CLK_APMIXED_VENCPLL 7
168#define CLK_APMIXED_TVDPLL 8
169#define CLK_APMIXED_MPLL 9
170#define CLK_APMIXED_VCODECPLL 10
171#define CLK_APMIXED_APLL1 11
172#define CLK_APMIXED_APLL2 12
173#define CLK_APMIXED_LVDSPLL 13
174#define CLK_APMIXED_MSDCPLL2 14
175#define CLK_APMIXED_NR_CLK 15
176
177/* INFRA_SYS */
178
179#define CLK_INFRA_DBGCLK 1
180#define CLK_INFRA_SMI 2
181#define CLK_INFRA_AUDIO 3
182#define CLK_INFRA_GCE 4
183#define CLK_INFRA_L2C_SRAM 5
184#define CLK_INFRA_M4U 6
185#define CLK_INFRA_CPUM 7
186#define CLK_INFRA_KP 8
187#define CLK_INFRA_CEC 9
188#define CLK_INFRA_PMICSPI 10
189#define CLK_INFRA_PMICWRAP 11
190#define CLK_INFRA_NR_CLK 12
191
192/* PERI_SYS */
193
194#define CLK_PERI_NFI 1
195#define CLK_PERI_THERM 2
196#define CLK_PERI_PWM1 3
197#define CLK_PERI_PWM2 4
198#define CLK_PERI_PWM3 5
199#define CLK_PERI_PWM4 6
200#define CLK_PERI_PWM5 7
201#define CLK_PERI_PWM6 8
202#define CLK_PERI_PWM7 9
203#define CLK_PERI_PWM 10
204#define CLK_PERI_USB0 11
205#define CLK_PERI_USB1 12
206#define CLK_PERI_AP_DMA 13
207#define CLK_PERI_MSDC30_0 14
208#define CLK_PERI_MSDC30_1 15
209#define CLK_PERI_MSDC30_2 16
210#define CLK_PERI_MSDC30_3 17
211#define CLK_PERI_NLI_ARB 18
212#define CLK_PERI_IRDA 19
213#define CLK_PERI_UART0 20
214#define CLK_PERI_UART1 21
215#define CLK_PERI_UART2 22
216#define CLK_PERI_UART3 23
217#define CLK_PERI_I2C0 24
218#define CLK_PERI_I2C1 25
219#define CLK_PERI_I2C2 26
220#define CLK_PERI_I2C3 27
221#define CLK_PERI_I2C4 28
222#define CLK_PERI_AUXADC 29
223#define CLK_PERI_SPI0 30
224#define CLK_PERI_I2C5 31
225#define CLK_PERI_NFIECC 32
226#define CLK_PERI_SPI 33
227#define CLK_PERI_IRRX 34
228#define CLK_PERI_I2C6 35
229#define CLK_PERI_UART0_SEL 36
230#define CLK_PERI_UART1_SEL 37
231#define CLK_PERI_UART2_SEL 38
232#define CLK_PERI_UART3_SEL 39
233#define CLK_PERI_NR_CLK 40
234
235#endif /* _DT_BINDINGS_CLK_MT8173_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index ebd63fd05649..dc4254b8cbbc 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -289,5 +289,7 @@
289#define UBI32_CORE1_CLK 279 289#define UBI32_CORE1_CLK 279
290#define UBI32_CORE2_CLK 280 290#define UBI32_CORE2_CLK 280
291#define EBI2_AON_CLK 281 291#define EBI2_AON_CLK 281
292#define NSSTCM_CLK_SRC 282
293#define NSSTCM_CLK 283
292 294
293#endif 295#endif
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
index 9a4b4c9ca44a..dd11ecdf837e 100644
--- a/include/dt-bindings/clock/r8a73a4-clock.h
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -54,6 +54,7 @@
54#define R8A73A4_CLK_IIC3 11 54#define R8A73A4_CLK_IIC3 11
55#define R8A73A4_CLK_IIC4 10 55#define R8A73A4_CLK_IIC4 10
56#define R8A73A4_CLK_IIC5 9 56#define R8A73A4_CLK_IIC5 9
57#define R8A73A4_CLK_IRQC 7
57 58
58/* MSTP5 */ 59/* MSTP5 */
59#define R8A73A4_CLK_THERMAL 22 60#define R8A73A4_CLK_THERMAL 22
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index 3f2c6b198d4a..ff7ca3584e16 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -79,6 +79,9 @@
79#define R8A7790_CLK_USBDMAC0 30 79#define R8A7790_CLK_USBDMAC0 30
80#define R8A7790_CLK_USBDMAC1 31 80#define R8A7790_CLK_USBDMAC1 31
81 81
82/* MSTP4 */
83#define R8A7790_CLK_IRQC 7
84
82/* MSTP5 */ 85/* MSTP5 */
83#define R8A7790_CLK_AUDIO_DMAC1 1 86#define R8A7790_CLK_AUDIO_DMAC1 1
84#define R8A7790_CLK_AUDIO_DMAC0 2 87#define R8A7790_CLK_AUDIO_DMAC0 2
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index 8fc5dc8faeea..402268384b99 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -70,6 +70,9 @@
70#define R8A7791_CLK_USBDMAC0 30 70#define R8A7791_CLK_USBDMAC0 30
71#define R8A7791_CLK_USBDMAC1 31 71#define R8A7791_CLK_USBDMAC1 31
72 72
73/* MSTP4 */
74#define R8A7791_CLK_IRQC 7
75
73/* MSTP5 */ 76/* MSTP5 */
74#define R8A7791_CLK_AUDIO_DMAC1 1 77#define R8A7791_CLK_AUDIO_DMAC1 1
75#define R8A7791_CLK_AUDIO_DMAC0 2 78#define R8A7791_CLK_AUDIO_DMAC0 2
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index d63323032d6e..09da38a58776 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -60,6 +60,9 @@
60#define R8A7794_CLK_USBDMAC0 30 60#define R8A7794_CLK_USBDMAC0 30
61#define R8A7794_CLK_USBDMAC1 31 61#define R8A7794_CLK_USBDMAC1 31
62 62
63/* MSTP4 */
64#define R8A7794_CLK_IRQC 7
65
63/* MSTP5 */ 66/* MSTP5 */
64#define R8A7794_CLK_THERMAL 22 67#define R8A7794_CLK_THERMAL 22
65#define R8A7794_CLK_PWM 23 68#define R8A7794_CLK_PWM 23
diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h
new file mode 100644
index 000000000000..b903d7de27c9
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,s2mps11.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2015 Markus Reichl
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Device Tree binding constants clocks for the Samsung S2MPS11 PMIC.
9 */
10
11#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H
12#define _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H
13
14/* Fixed rate clocks. */
15
16#define S2MPS11_CLK_AP 0
17#define S2MPS11_CLK_CP 1
18#define S2MPS11_CLK_BT 2
19
20/* Total number of clocks. */
21#define S2MPS11_CLKS_NUM (S2MPS11_CLK_BT + 1)
22
23#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H */
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 979d24a6799f..d19763439472 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -193,6 +193,7 @@
193#define VF610_PLL6_BYPASS 180 193#define VF610_PLL6_BYPASS 180
194#define VF610_PLL7_BYPASS 181 194#define VF610_PLL7_BYPASS 181
195#define VF610_CLK_SNVS 182 195#define VF610_CLK_SNVS 182
196#define VF610_CLK_END 183 196#define VF610_CLK_DAP 183
197#define VF610_CLK_END 184
197 198
198#endif /* __DT_BINDINGS_CLOCK_VF610_H */ 199#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h
new file mode 100644
index 000000000000..e683dbb7e7c5
--- /dev/null
+++ b/include/dt-bindings/clock/zx296702-clock.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright 2014 Linaro Ltd.
3 * Copyright (C) 2014 ZTE Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __DT_BINDINGS_CLOCK_ZX296702_H
11#define __DT_BINDINGS_CLOCK_ZX296702_H
12
13#define ZX296702_OSC 0
14#define ZX296702_PLL_A9 1
15#define ZX296702_PLL_A9_350M 2
16#define ZX296702_PLL_MAC_1000M 3
17#define ZX296702_PLL_MAC_333M 4
18#define ZX296702_PLL_MM0_1188M 5
19#define ZX296702_PLL_MM0_396M 6
20#define ZX296702_PLL_MM0_198M 7
21#define ZX296702_PLL_MM1_108M 8
22#define ZX296702_PLL_MM1_72M 9
23#define ZX296702_PLL_MM1_54M 10
24#define ZX296702_PLL_LSP_104M 11
25#define ZX296702_PLL_LSP_26M 12
26#define ZX296702_PLL_AUDIO_294M912 13
27#define ZX296702_PLL_DDR_266M 14
28#define ZX296702_CLK_148M5 15
29#define ZX296702_MATRIX_ACLK 16
30#define ZX296702_MAIN_HCLK 17
31#define ZX296702_MAIN_PCLK 18
32#define ZX296702_CLK_500 19
33#define ZX296702_CLK_250 20
34#define ZX296702_CLK_125 21
35#define ZX296702_CLK_74M25 22
36#define ZX296702_A9_WCLK 23
37#define ZX296702_A9_AS1_ACLK_MUX 24
38#define ZX296702_A9_TRACE_CLKIN_MUX 25
39#define ZX296702_A9_AS1_ACLK_DIV 26
40#define ZX296702_CLK_2 27
41#define ZX296702_CLK_27 28
42#define ZX296702_DECPPU_ACLK_MUX 29
43#define ZX296702_PPU_ACLK_MUX 30
44#define ZX296702_MALI400_ACLK_MUX 31
45#define ZX296702_VOU_ACLK_MUX 32
46#define ZX296702_VOU_MAIN_WCLK_MUX 33
47#define ZX296702_VOU_AUX_WCLK_MUX 34
48#define ZX296702_VOU_SCALER_WCLK_MUX 35
49#define ZX296702_R2D_ACLK_MUX 36
50#define ZX296702_R2D_WCLK_MUX 37
51#define ZX296702_CLK_50 38
52#define ZX296702_CLK_25 39
53#define ZX296702_CLK_12 40
54#define ZX296702_CLK_16M384 41
55#define ZX296702_CLK_32K768 42
56#define ZX296702_SEC_WCLK_DIV 43
57#define ZX296702_DDR_WCLK_MUX 44
58#define ZX296702_NAND_WCLK_MUX 45
59#define ZX296702_LSP_26_WCLK_MUX 46
60#define ZX296702_A9_AS0_ACLK 47
61#define ZX296702_A9_AS1_ACLK 48
62#define ZX296702_A9_TRACE_CLKIN 49
63#define ZX296702_DECPPU_AXI_M_ACLK 50
64#define ZX296702_DECPPU_AHB_S_HCLK 51
65#define ZX296702_PPU_AXI_M_ACLK 52
66#define ZX296702_PPU_AHB_S_HCLK 53
67#define ZX296702_VOU_AXI_M_ACLK 54
68#define ZX296702_VOU_APB_PCLK 55
69#define ZX296702_VOU_MAIN_CHANNEL_WCLK 56
70#define ZX296702_VOU_AUX_CHANNEL_WCLK 57
71#define ZX296702_VOU_HDMI_OSCLK_CEC 58
72#define ZX296702_VOU_SCALER_WCLK 59
73#define ZX296702_MALI400_AXI_M_ACLK 60
74#define ZX296702_MALI400_APB_PCLK 61
75#define ZX296702_R2D_WCLK 62
76#define ZX296702_R2D_AXI_M_ACLK 63
77#define ZX296702_R2D_AHB_HCLK 64
78#define ZX296702_DDR3_AXI_S0_ACLK 65
79#define ZX296702_DDR3_APB_PCLK 66
80#define ZX296702_DDR3_WCLK 67
81#define ZX296702_USB20_0_AHB_HCLK 68
82#define ZX296702_USB20_0_EXTREFCLK 69
83#define ZX296702_USB20_1_AHB_HCLK 70
84#define ZX296702_USB20_1_EXTREFCLK 71
85#define ZX296702_USB20_2_AHB_HCLK 72
86#define ZX296702_USB20_2_EXTREFCLK 73
87#define ZX296702_GMAC_AXI_M_ACLK 74
88#define ZX296702_GMAC_APB_PCLK 75
89#define ZX296702_GMAC_125_CLKIN 76
90#define ZX296702_GMAC_RMII_CLKIN 77
91#define ZX296702_GMAC_25M_CLK 78
92#define ZX296702_NANDFLASH_AHB_HCLK 79
93#define ZX296702_NANDFLASH_WCLK 80
94#define ZX296702_LSP0_APB_PCLK 81
95#define ZX296702_LSP0_AHB_HCLK 82
96#define ZX296702_LSP0_26M_WCLK 83
97#define ZX296702_LSP0_104M_WCLK 84
98#define ZX296702_LSP0_16M384_WCLK 85
99#define ZX296702_LSP1_APB_PCLK 86
100#define ZX296702_LSP1_26M_WCLK 87
101#define ZX296702_LSP1_104M_WCLK 88
102#define ZX296702_LSP1_32K_CLK 89
103#define ZX296702_AON_HCLK 90
104#define ZX296702_SYS_CTRL_PCLK 91
105#define ZX296702_DMA_PCLK 92
106#define ZX296702_DMA_ACLK 93
107#define ZX296702_SEC_HCLK 94
108#define ZX296702_AES_WCLK 95
109#define ZX296702_DES_WCLK 96
110#define ZX296702_IRAM_ACLK 97
111#define ZX296702_IROM_ACLK 98
112#define ZX296702_BOOT_CTRL_HCLK 99
113#define ZX296702_EFUSE_CLK_30 100
114#define ZX296702_VOU_MAIN_CHANNEL_DIV 101
115#define ZX296702_VOU_AUX_CHANNEL_DIV 102
116#define ZX296702_VOU_TV_ENC_HD_DIV 103
117#define ZX296702_VOU_TV_ENC_SD_DIV 104
118#define ZX296702_VL0_MUX 105
119#define ZX296702_VL1_MUX 106
120#define ZX296702_VL2_MUX 107
121#define ZX296702_GL0_MUX 108
122#define ZX296702_GL1_MUX 109
123#define ZX296702_GL2_MUX 110
124#define ZX296702_WB_MUX 111
125#define ZX296702_HDMI_MUX 112
126#define ZX296702_VOU_TV_ENC_HD_MUX 113
127#define ZX296702_VOU_TV_ENC_SD_MUX 114
128#define ZX296702_VL0_CLK 115
129#define ZX296702_VL1_CLK 116
130#define ZX296702_VL2_CLK 117
131#define ZX296702_GL0_CLK 118
132#define ZX296702_GL1_CLK 119
133#define ZX296702_GL2_CLK 120
134#define ZX296702_WB_CLK 121
135#define ZX296702_CL_CLK 122
136#define ZX296702_MAIN_MIX_CLK 123
137#define ZX296702_AUX_MIX_CLK 124
138#define ZX296702_HDMI_CLK 125
139#define ZX296702_VOU_TV_ENC_HD_DAC_CLK 126
140#define ZX296702_VOU_TV_ENC_SD_DAC_CLK 127
141#define ZX296702_A9_PERIPHCLK 128
142#define ZX296702_TOPCLK_END 129
143
144#define ZX296702_SDMMC1_WCLK_MUX 0
145#define ZX296702_SDMMC1_WCLK_DIV 1
146#define ZX296702_SDMMC1_WCLK 2
147#define ZX296702_SDMMC1_PCLK 3
148#define ZX296702_SPDIF0_WCLK_MUX 4
149#define ZX296702_SPDIF0_WCLK 5
150#define ZX296702_SPDIF0_PCLK 6
151#define ZX296702_SPDIF0_DIV 7
152#define ZX296702_I2S0_WCLK_MUX 8
153#define ZX296702_I2S0_WCLK 9
154#define ZX296702_I2S0_PCLK 10
155#define ZX296702_I2S0_DIV 11
156#define ZX296702_LSP0CLK_END 12
157
158#define ZX296702_UART0_WCLK_MUX 0
159#define ZX296702_UART0_WCLK 1
160#define ZX296702_UART0_PCLK 2
161#define ZX296702_UART1_WCLK_MUX 3
162#define ZX296702_UART1_WCLK 4
163#define ZX296702_UART1_PCLK 5
164#define ZX296702_SDMMC0_WCLK_MUX 6
165#define ZX296702_SDMMC0_WCLK_DIV 7
166#define ZX296702_SDMMC0_WCLK 8
167#define ZX296702_SDMMC0_PCLK 9
168#define ZX296702_LSP1CLK_END 10
169
170#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */
diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h
index c7af7c7ef793..7b2000cead43 100644
--- a/include/dt-bindings/mfd/arizona.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -90,4 +90,22 @@
90#define ARIZONA_INMODE_SE 1 90#define ARIZONA_INMODE_SE 1
91#define ARIZONA_INMODE_DMIC 2 91#define ARIZONA_INMODE_DMIC 2
92 92
93#define ARIZONA_MICD_TIME_CONTINUOUS 0
94#define ARIZONA_MICD_TIME_250US 1
95#define ARIZONA_MICD_TIME_500US 2
96#define ARIZONA_MICD_TIME_1MS 3
97#define ARIZONA_MICD_TIME_2MS 4
98#define ARIZONA_MICD_TIME_4MS 5
99#define ARIZONA_MICD_TIME_8MS 6
100#define ARIZONA_MICD_TIME_16MS 7
101#define ARIZONA_MICD_TIME_32MS 8
102#define ARIZONA_MICD_TIME_64MS 9
103#define ARIZONA_MICD_TIME_128MS 10
104#define ARIZONA_MICD_TIME_256MS 11
105#define ARIZONA_MICD_TIME_512MS 12
106
107#define ARIZONA_ACCDET_MODE_MIC 0
108#define ARIZONA_ACCDET_MODE_HPL 1
109#define ARIZONA_ACCDET_MODE_HPR 2
110
93#endif 111#endif
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
new file mode 100644
index 000000000000..e3e6c75d8822
--- /dev/null
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -0,0 +1,15 @@
1/*
2 * This header provides shared DT/Driver defines for ST's LPC device
3 *
4 * Copyright (C) 2014 STMicroelectronics -- All Rights Reserved
5 *
6 * Author: Lee Jones <lee.jones@linaro.org> for STMicroelectronics
7 */
8
9#ifndef __DT_BINDINGS_ST_LPC_H__
10#define __DT_BINDINGS_ST_LPC_H__
11
12#define ST_LPC_MODE_RTC 0
13#define ST_LPC_MODE_WDT 1
14
15#endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
new file mode 100644
index 000000000000..172744a72eb7
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -0,0 +1,45 @@
1/*
2 * Device Tree constants for the Texas Instruments DP83867 PHY
3 *
4 * Author: Dan Murphy <dmurphy@ti.com>
5 *
6 * Copyright: (C) 2015 Texas Instruments, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef _DT_BINDINGS_TI_DP83867_H
19#define _DT_BINDINGS_TI_DP83867_H
20
21/* PHY CTRL bits */
22#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00
23#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01
24#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02
25#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03
26
27/* RGMIIDCTL internal delay for rx and tx */
28#define DP83867_RGMIIDCTL_250_PS 0x0
29#define DP83867_RGMIIDCTL_500_PS 0x1
30#define DP83867_RGMIIDCTL_750_PS 0x2
31#define DP83867_RGMIIDCTL_1_NS 0x3
32#define DP83867_RGMIIDCTL_1_25_NS 0x4
33#define DP83867_RGMIIDCTL_1_50_NS 0x5
34#define DP83867_RGMIIDCTL_1_75_NS 0x6
35#define DP83867_RGMIIDCTL_2_00_NS 0x7
36#define DP83867_RGMIIDCTL_2_25_NS 0x8
37#define DP83867_RGMIIDCTL_2_50_NS 0x9
38#define DP83867_RGMIIDCTL_2_75_NS 0xa
39#define DP83867_RGMIIDCTL_3_00_NS 0xb
40#define DP83867_RGMIIDCTL_3_25_NS 0xc
41#define DP83867_RGMIIDCTL_3_50_NS 0xd
42#define DP83867_RGMIIDCTL_3_75_NS 0xe
43#define DP83867_RGMIIDCTL_4_00_NS 0xf
44
45#endif
diff --git a/include/dt-bindings/phy/phy-pistachio-usb.h b/include/dt-bindings/phy/phy-pistachio-usb.h
new file mode 100644
index 000000000000..d1877aa0a3f5
--- /dev/null
+++ b/include/dt-bindings/phy/phy-pistachio-usb.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2015 Google, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 */
8
9#ifndef _DT_BINDINGS_PHY_PISTACHIO
10#define _DT_BINDINGS_PHY_PISTACHIO
11
12#define REFCLK_XO_CRYSTAL 0x0
13#define REFCLK_X0_EXT_CLK 0x1
14#define REFCLK_CLK_CORE 0x2
15
16#endif /* _DT_BINDINGS_PHY_PISTACHIO */
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index 5f4d01898c9c..b00bbc9c60b4 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -21,6 +21,7 @@
21#define SLEWCTRL_SLOW (1 << 19) 21#define SLEWCTRL_SLOW (1 << 19)
22#define SLEWCTRL_FAST 0 22#define SLEWCTRL_FAST 0
23#define DS0_PULL_UP_DOWN_EN (1 << 27) 23#define DS0_PULL_UP_DOWN_EN (1 << 27)
24#define WAKEUP_ENABLE (1 << 29)
24 25
25#define PIN_OUTPUT (PULL_DISABLE) 26#define PIN_OUTPUT (PULL_DISABLE)
26#define PIN_OUTPUT_PULLUP (PULL_UP) 27#define PIN_OUTPUT_PULLUP (PULL_UP)
diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h
new file mode 100644
index 000000000000..6f0bc37af39c
--- /dev/null
+++ b/include/dt-bindings/pinctrl/bcm2835.h
@@ -0,0 +1,27 @@
1/*
2 * Header providing constants for bcm2835 pinctrl bindings.
3 *
4 * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com>
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14#ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__
15#define __DT_BINDINGS_PINCTRL_BCM2835_H__
16
17/* brcm,function property */
18#define BCM2835_FSEL_GPIO_IN 0
19#define BCM2835_FSEL_GPIO_OUT 1
20#define BCM2835_FSEL_ALT5 2
21#define BCM2835_FSEL_ALT4 3
22#define BCM2835_FSEL_ALT0 4
23#define BCM2835_FSEL_ALT1 5
24#define BCM2835_FSEL_ALT2 6
25#define BCM2835_FSEL_ALT3 7
26
27#endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */
diff --git a/include/dt-bindings/pinctrl/mt6397-pinfunc.h b/include/dt-bindings/pinctrl/mt6397-pinfunc.h
new file mode 100644
index 000000000000..85739b308c2f
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6397-pinfunc.h
@@ -0,0 +1,256 @@
1#ifndef __DTS_MT6397_PINFUNC_H
2#define __DTS_MT6397_PINFUNC_H
3
4#include <dt-bindings/pinctrl/mt65xx.h>
5
6#define MT6397_PIN_0_INT__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
7#define MT6397_PIN_0_INT__FUNC_INT (MTK_PIN_NO(0) | 1)
8
9#define MT6397_PIN_1_SRCVOLTEN__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
10#define MT6397_PIN_1_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(1) | 1)
11#define MT6397_PIN_1_SRCVOLTEN__FUNC_TEST_CK1 (MTK_PIN_NO(1) | 6)
12
13#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
14#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_SRCLKEN_PERI (MTK_PIN_NO(2) | 1)
15#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_TEST_CK2 (MTK_PIN_NO(2) | 6)
16
17#define MT6397_PIN_3_RTC_32K1V8__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
18#define MT6397_PIN_3_RTC_32K1V8__FUNC_RTC_32K1V8 (MTK_PIN_NO(3) | 1)
19#define MT6397_PIN_3_RTC_32K1V8__FUNC_TEST_CK3 (MTK_PIN_NO(3) | 6)
20
21#define MT6397_PIN_4_WRAP_EVENT__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
22#define MT6397_PIN_4_WRAP_EVENT__FUNC_WRAP_EVENT (MTK_PIN_NO(4) | 1)
23
24#define MT6397_PIN_5_SPI_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
25#define MT6397_PIN_5_SPI_CLK__FUNC_SPI_CLK (MTK_PIN_NO(5) | 1)
26
27#define MT6397_PIN_6_SPI_CSN__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
28#define MT6397_PIN_6_SPI_CSN__FUNC_SPI_CSN (MTK_PIN_NO(6) | 1)
29
30#define MT6397_PIN_7_SPI_MOSI__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
31#define MT6397_PIN_7_SPI_MOSI__FUNC_SPI_MOSI (MTK_PIN_NO(7) | 1)
32
33#define MT6397_PIN_8_SPI_MISO__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
34#define MT6397_PIN_8_SPI_MISO__FUNC_SPI_MISO (MTK_PIN_NO(8) | 1)
35
36#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
37#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(9) | 1)
38#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_IN0 (MTK_PIN_NO(9) | 6)
39#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_OUT0 (MTK_PIN_NO(9) | 7)
40
41#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
42#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_AUD_MISO (MTK_PIN_NO(10) | 1)
43#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_IN1 (MTK_PIN_NO(10) | 6)
44#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_OUT1 (MTK_PIN_NO(10) | 7)
45
46#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
47#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_AUD_MOSI (MTK_PIN_NO(11) | 1)
48#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_IN2 (MTK_PIN_NO(11) | 6)
49#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_OUT2 (MTK_PIN_NO(11) | 7)
50
51#define MT6397_PIN_12_COL0__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
52#define MT6397_PIN_12_COL0__FUNC_COL0_USBDL (MTK_PIN_NO(12) | 1)
53#define MT6397_PIN_12_COL0__FUNC_EINT10_1X (MTK_PIN_NO(12) | 2)
54#define MT6397_PIN_12_COL0__FUNC_PWM1_3X (MTK_PIN_NO(12) | 3)
55#define MT6397_PIN_12_COL0__FUNC_TEST_IN3 (MTK_PIN_NO(12) | 6)
56#define MT6397_PIN_12_COL0__FUNC_TEST_OUT3 (MTK_PIN_NO(12) | 7)
57
58#define MT6397_PIN_13_COL1__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
59#define MT6397_PIN_13_COL1__FUNC_COL1 (MTK_PIN_NO(13) | 1)
60#define MT6397_PIN_13_COL1__FUNC_EINT11_1X (MTK_PIN_NO(13) | 2)
61#define MT6397_PIN_13_COL1__FUNC_SCL0_2X (MTK_PIN_NO(13) | 3)
62#define MT6397_PIN_13_COL1__FUNC_TEST_IN4 (MTK_PIN_NO(13) | 6)
63#define MT6397_PIN_13_COL1__FUNC_TEST_OUT4 (MTK_PIN_NO(13) | 7)
64
65#define MT6397_PIN_14_COL2__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
66#define MT6397_PIN_14_COL2__FUNC_COL2 (MTK_PIN_NO(14) | 1)
67#define MT6397_PIN_14_COL2__FUNC_EINT12_1X (MTK_PIN_NO(14) | 2)
68#define MT6397_PIN_14_COL2__FUNC_SDA0_2X (MTK_PIN_NO(14) | 3)
69#define MT6397_PIN_14_COL2__FUNC_TEST_IN5 (MTK_PIN_NO(14) | 6)
70#define MT6397_PIN_14_COL2__FUNC_TEST_OUT5 (MTK_PIN_NO(14) | 7)
71
72#define MT6397_PIN_15_COL3__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
73#define MT6397_PIN_15_COL3__FUNC_COL3 (MTK_PIN_NO(15) | 1)
74#define MT6397_PIN_15_COL3__FUNC_EINT13_1X (MTK_PIN_NO(15) | 2)
75#define MT6397_PIN_15_COL3__FUNC_SCL1_2X (MTK_PIN_NO(15) | 3)
76#define MT6397_PIN_15_COL3__FUNC_TEST_IN6 (MTK_PIN_NO(15) | 6)
77#define MT6397_PIN_15_COL3__FUNC_TEST_OUT6 (MTK_PIN_NO(15) | 7)
78
79#define MT6397_PIN_16_COL4__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
80#define MT6397_PIN_16_COL4__FUNC_COL4 (MTK_PIN_NO(16) | 1)
81#define MT6397_PIN_16_COL4__FUNC_EINT14_1X (MTK_PIN_NO(16) | 2)
82#define MT6397_PIN_16_COL4__FUNC_SDA1_2X (MTK_PIN_NO(16) | 3)
83#define MT6397_PIN_16_COL4__FUNC_TEST_IN7 (MTK_PIN_NO(16) | 6)
84#define MT6397_PIN_16_COL4__FUNC_TEST_OUT7 (MTK_PIN_NO(16) | 7)
85
86#define MT6397_PIN_17_COL5__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
87#define MT6397_PIN_17_COL5__FUNC_COL5 (MTK_PIN_NO(17) | 1)
88#define MT6397_PIN_17_COL5__FUNC_EINT15_1X (MTK_PIN_NO(17) | 2)
89#define MT6397_PIN_17_COL5__FUNC_SCL2_2X (MTK_PIN_NO(17) | 3)
90#define MT6397_PIN_17_COL5__FUNC_TEST_IN8 (MTK_PIN_NO(17) | 6)
91#define MT6397_PIN_17_COL5__FUNC_TEST_OUT8 (MTK_PIN_NO(17) | 7)
92
93#define MT6397_PIN_18_COL6__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
94#define MT6397_PIN_18_COL6__FUNC_COL6 (MTK_PIN_NO(18) | 1)
95#define MT6397_PIN_18_COL6__FUNC_EINT16_1X (MTK_PIN_NO(18) | 2)
96#define MT6397_PIN_18_COL6__FUNC_SDA2_2X (MTK_PIN_NO(18) | 3)
97#define MT6397_PIN_18_COL6__FUNC_GPIO32K_0 (MTK_PIN_NO(18) | 4)
98#define MT6397_PIN_18_COL6__FUNC_GPIO26M_0 (MTK_PIN_NO(18) | 5)
99#define MT6397_PIN_18_COL6__FUNC_TEST_IN9 (MTK_PIN_NO(18) | 6)
100#define MT6397_PIN_18_COL6__FUNC_TEST_OUT9 (MTK_PIN_NO(18) | 7)
101
102#define MT6397_PIN_19_COL7__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
103#define MT6397_PIN_19_COL7__FUNC_COL7 (MTK_PIN_NO(19) | 1)
104#define MT6397_PIN_19_COL7__FUNC_EINT17_1X (MTK_PIN_NO(19) | 2)
105#define MT6397_PIN_19_COL7__FUNC_PWM2_3X (MTK_PIN_NO(19) | 3)
106#define MT6397_PIN_19_COL7__FUNC_GPIO32K_1 (MTK_PIN_NO(19) | 4)
107#define MT6397_PIN_19_COL7__FUNC_GPIO26M_1 (MTK_PIN_NO(19) | 5)
108#define MT6397_PIN_19_COL7__FUNC_TEST_IN10 (MTK_PIN_NO(19) | 6)
109#define MT6397_PIN_19_COL7__FUNC_TEST_OUT10 (MTK_PIN_NO(19) | 7)
110
111#define MT6397_PIN_20_ROW0__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
112#define MT6397_PIN_20_ROW0__FUNC_ROW0 (MTK_PIN_NO(20) | 1)
113#define MT6397_PIN_20_ROW0__FUNC_EINT18_1X (MTK_PIN_NO(20) | 2)
114#define MT6397_PIN_20_ROW0__FUNC_SCL0_3X (MTK_PIN_NO(20) | 3)
115#define MT6397_PIN_20_ROW0__FUNC_TEST_IN11 (MTK_PIN_NO(20) | 6)
116#define MT6397_PIN_20_ROW0__FUNC_TEST_OUT11 (MTK_PIN_NO(20) | 7)
117
118#define MT6397_PIN_21_ROW1__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
119#define MT6397_PIN_21_ROW1__FUNC_ROW1 (MTK_PIN_NO(21) | 1)
120#define MT6397_PIN_21_ROW1__FUNC_EINT19_1X (MTK_PIN_NO(21) | 2)
121#define MT6397_PIN_21_ROW1__FUNC_SDA0_3X (MTK_PIN_NO(21) | 3)
122#define MT6397_PIN_21_ROW1__FUNC_AUD_TSTCK (MTK_PIN_NO(21) | 4)
123#define MT6397_PIN_21_ROW1__FUNC_TEST_IN12 (MTK_PIN_NO(21) | 6)
124#define MT6397_PIN_21_ROW1__FUNC_TEST_OUT12 (MTK_PIN_NO(21) | 7)
125
126#define MT6397_PIN_22_ROW2__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
127#define MT6397_PIN_22_ROW2__FUNC_ROW2 (MTK_PIN_NO(22) | 1)
128#define MT6397_PIN_22_ROW2__FUNC_EINT20_1X (MTK_PIN_NO(22) | 2)
129#define MT6397_PIN_22_ROW2__FUNC_SCL1_3X (MTK_PIN_NO(22) | 3)
130#define MT6397_PIN_22_ROW2__FUNC_TEST_IN13 (MTK_PIN_NO(22) | 6)
131#define MT6397_PIN_22_ROW2__FUNC_TEST_OUT13 (MTK_PIN_NO(22) | 7)
132
133#define MT6397_PIN_23_ROW3__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
134#define MT6397_PIN_23_ROW3__FUNC_ROW3 (MTK_PIN_NO(23) | 1)
135#define MT6397_PIN_23_ROW3__FUNC_EINT21_1X (MTK_PIN_NO(23) | 2)
136#define MT6397_PIN_23_ROW3__FUNC_SDA1_3X (MTK_PIN_NO(23) | 3)
137#define MT6397_PIN_23_ROW3__FUNC_TEST_IN14 (MTK_PIN_NO(23) | 6)
138#define MT6397_PIN_23_ROW3__FUNC_TEST_OUT14 (MTK_PIN_NO(23) | 7)
139
140#define MT6397_PIN_24_ROW4__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
141#define MT6397_PIN_24_ROW4__FUNC_ROW4 (MTK_PIN_NO(24) | 1)
142#define MT6397_PIN_24_ROW4__FUNC_EINT22_1X (MTK_PIN_NO(24) | 2)
143#define MT6397_PIN_24_ROW4__FUNC_SCL2_3X (MTK_PIN_NO(24) | 3)
144#define MT6397_PIN_24_ROW4__FUNC_TEST_IN15 (MTK_PIN_NO(24) | 6)
145#define MT6397_PIN_24_ROW4__FUNC_TEST_OUT15 (MTK_PIN_NO(24) | 7)
146
147#define MT6397_PIN_25_ROW5__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
148#define MT6397_PIN_25_ROW5__FUNC_ROW5 (MTK_PIN_NO(25) | 1)
149#define MT6397_PIN_25_ROW5__FUNC_EINT23_1X (MTK_PIN_NO(25) | 2)
150#define MT6397_PIN_25_ROW5__FUNC_SDA2_3X (MTK_PIN_NO(25) | 3)
151#define MT6397_PIN_25_ROW5__FUNC_TEST_IN16 (MTK_PIN_NO(25) | 6)
152#define MT6397_PIN_25_ROW5__FUNC_TEST_OUT16 (MTK_PIN_NO(25) | 7)
153
154#define MT6397_PIN_26_ROW6__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
155#define MT6397_PIN_26_ROW6__FUNC_ROW6 (MTK_PIN_NO(26) | 1)
156#define MT6397_PIN_26_ROW6__FUNC_EINT24_1X (MTK_PIN_NO(26) | 2)
157#define MT6397_PIN_26_ROW6__FUNC_PWM3_3X (MTK_PIN_NO(26) | 3)
158#define MT6397_PIN_26_ROW6__FUNC_GPIO32K_2 (MTK_PIN_NO(26) | 4)
159#define MT6397_PIN_26_ROW6__FUNC_GPIO26M_2 (MTK_PIN_NO(26) | 5)
160#define MT6397_PIN_26_ROW6__FUNC_TEST_IN17 (MTK_PIN_NO(26) | 6)
161#define MT6397_PIN_26_ROW6__FUNC_TEST_OUT17 (MTK_PIN_NO(26) | 7)
162
163#define MT6397_PIN_27_ROW7__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
164#define MT6397_PIN_27_ROW7__FUNC_ROW7 (MTK_PIN_NO(27) | 1)
165#define MT6397_PIN_27_ROW7__FUNC_EINT3_1X (MTK_PIN_NO(27) | 2)
166#define MT6397_PIN_27_ROW7__FUNC_CBUS (MTK_PIN_NO(27) | 3)
167#define MT6397_PIN_27_ROW7__FUNC_GPIO32K_3 (MTK_PIN_NO(27) | 4)
168#define MT6397_PIN_27_ROW7__FUNC_GPIO26M_3 (MTK_PIN_NO(27) | 5)
169#define MT6397_PIN_27_ROW7__FUNC_TEST_IN18 (MTK_PIN_NO(27) | 6)
170#define MT6397_PIN_27_ROW7__FUNC_TEST_OUT18 (MTK_PIN_NO(27) | 7)
171
172#define MT6397_PIN_28_PWM1__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
173#define MT6397_PIN_28_PWM1__FUNC_PWM1 (MTK_PIN_NO(28) | 1)
174#define MT6397_PIN_28_PWM1__FUNC_EINT4_1X (MTK_PIN_NO(28) | 2)
175#define MT6397_PIN_28_PWM1__FUNC_GPIO32K_4 (MTK_PIN_NO(28) | 4)
176#define MT6397_PIN_28_PWM1__FUNC_GPIO26M_4 (MTK_PIN_NO(28) | 5)
177#define MT6397_PIN_28_PWM1__FUNC_TEST_IN19 (MTK_PIN_NO(28) | 6)
178#define MT6397_PIN_28_PWM1__FUNC_TEST_OUT19 (MTK_PIN_NO(28) | 7)
179
180#define MT6397_PIN_29_PWM2__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
181#define MT6397_PIN_29_PWM2__FUNC_PWM2 (MTK_PIN_NO(29) | 1)
182#define MT6397_PIN_29_PWM2__FUNC_EINT5_1X (MTK_PIN_NO(29) | 2)
183#define MT6397_PIN_29_PWM2__FUNC_GPIO32K_5 (MTK_PIN_NO(29) | 4)
184#define MT6397_PIN_29_PWM2__FUNC_GPIO26M_5 (MTK_PIN_NO(29) | 5)
185#define MT6397_PIN_29_PWM2__FUNC_TEST_IN20 (MTK_PIN_NO(29) | 6)
186#define MT6397_PIN_29_PWM2__FUNC_TEST_OUT20 (MTK_PIN_NO(29) | 7)
187
188#define MT6397_PIN_30_PWM3__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
189#define MT6397_PIN_30_PWM3__FUNC_PWM3 (MTK_PIN_NO(30) | 1)
190#define MT6397_PIN_30_PWM3__FUNC_EINT6_1X (MTK_PIN_NO(30) | 2)
191#define MT6397_PIN_30_PWM3__FUNC_COL0 (MTK_PIN_NO(30) | 3)
192#define MT6397_PIN_30_PWM3__FUNC_GPIO32K_6 (MTK_PIN_NO(30) | 4)
193#define MT6397_PIN_30_PWM3__FUNC_GPIO26M_6 (MTK_PIN_NO(30) | 5)
194#define MT6397_PIN_30_PWM3__FUNC_TEST_IN21 (MTK_PIN_NO(30) | 6)
195#define MT6397_PIN_30_PWM3__FUNC_TEST_OUT21 (MTK_PIN_NO(30) | 7)
196
197#define MT6397_PIN_31_SCL0__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
198#define MT6397_PIN_31_SCL0__FUNC_SCL0 (MTK_PIN_NO(31) | 1)
199#define MT6397_PIN_31_SCL0__FUNC_EINT7_1X (MTK_PIN_NO(31) | 2)
200#define MT6397_PIN_31_SCL0__FUNC_PWM1_2X (MTK_PIN_NO(31) | 3)
201#define MT6397_PIN_31_SCL0__FUNC_TEST_IN22 (MTK_PIN_NO(31) | 6)
202#define MT6397_PIN_31_SCL0__FUNC_TEST_OUT22 (MTK_PIN_NO(31) | 7)
203
204#define MT6397_PIN_32_SDA0__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
205#define MT6397_PIN_32_SDA0__FUNC_SDA0 (MTK_PIN_NO(32) | 1)
206#define MT6397_PIN_32_SDA0__FUNC_EINT8_1X (MTK_PIN_NO(32) | 2)
207#define MT6397_PIN_32_SDA0__FUNC_TEST_IN23 (MTK_PIN_NO(32) | 6)
208#define MT6397_PIN_32_SDA0__FUNC_TEST_OUT23 (MTK_PIN_NO(32) | 7)
209
210#define MT6397_PIN_33_SCL1__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
211#define MT6397_PIN_33_SCL1__FUNC_SCL1 (MTK_PIN_NO(33) | 1)
212#define MT6397_PIN_33_SCL1__FUNC_EINT9_1X (MTK_PIN_NO(33) | 2)
213#define MT6397_PIN_33_SCL1__FUNC_PWM2_2X (MTK_PIN_NO(33) | 3)
214#define MT6397_PIN_33_SCL1__FUNC_TEST_IN24 (MTK_PIN_NO(33) | 6)
215#define MT6397_PIN_33_SCL1__FUNC_TEST_OUT24 (MTK_PIN_NO(33) | 7)
216
217#define MT6397_PIN_34_SDA1__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
218#define MT6397_PIN_34_SDA1__FUNC_SDA1 (MTK_PIN_NO(34) | 1)
219#define MT6397_PIN_34_SDA1__FUNC_EINT0_1X (MTK_PIN_NO(34) | 2)
220#define MT6397_PIN_34_SDA1__FUNC_TEST_IN25 (MTK_PIN_NO(34) | 6)
221#define MT6397_PIN_34_SDA1__FUNC_TEST_OUT25 (MTK_PIN_NO(34) | 7)
222
223#define MT6397_PIN_35_SCL2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
224#define MT6397_PIN_35_SCL2__FUNC_SCL2 (MTK_PIN_NO(35) | 1)
225#define MT6397_PIN_35_SCL2__FUNC_EINT1_1X (MTK_PIN_NO(35) | 2)
226#define MT6397_PIN_35_SCL2__FUNC_PWM3_2X (MTK_PIN_NO(35) | 3)
227#define MT6397_PIN_35_SCL2__FUNC_TEST_IN26 (MTK_PIN_NO(35) | 6)
228#define MT6397_PIN_35_SCL2__FUNC_TEST_OUT26 (MTK_PIN_NO(35) | 7)
229
230#define MT6397_PIN_36_SDA2__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
231#define MT6397_PIN_36_SDA2__FUNC_SDA2 (MTK_PIN_NO(36) | 1)
232#define MT6397_PIN_36_SDA2__FUNC_EINT2_1X (MTK_PIN_NO(36) | 2)
233#define MT6397_PIN_36_SDA2__FUNC_TEST_IN27 (MTK_PIN_NO(36) | 6)
234#define MT6397_PIN_36_SDA2__FUNC_TEST_OUT27 (MTK_PIN_NO(36) | 7)
235
236#define MT6397_PIN_37_HDMISD__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
237#define MT6397_PIN_37_HDMISD__FUNC_HDMISD (MTK_PIN_NO(37) | 1)
238#define MT6397_PIN_37_HDMISD__FUNC_TEST_IN28 (MTK_PIN_NO(37) | 6)
239#define MT6397_PIN_37_HDMISD__FUNC_TEST_OUT28 (MTK_PIN_NO(37) | 7)
240
241#define MT6397_PIN_38_HDMISCK__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
242#define MT6397_PIN_38_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(38) | 1)
243#define MT6397_PIN_38_HDMISCK__FUNC_TEST_IN29 (MTK_PIN_NO(38) | 6)
244#define MT6397_PIN_38_HDMISCK__FUNC_TEST_OUT29 (MTK_PIN_NO(38) | 7)
245
246#define MT6397_PIN_39_HTPLG__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
247#define MT6397_PIN_39_HTPLG__FUNC_HTPLG (MTK_PIN_NO(39) | 1)
248#define MT6397_PIN_39_HTPLG__FUNC_TEST_IN30 (MTK_PIN_NO(39) | 6)
249#define MT6397_PIN_39_HTPLG__FUNC_TEST_OUT30 (MTK_PIN_NO(39) | 7)
250
251#define MT6397_PIN_40_CEC__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
252#define MT6397_PIN_40_CEC__FUNC_CEC (MTK_PIN_NO(40) | 1)
253#define MT6397_PIN_40_CEC__FUNC_TEST_IN31 (MTK_PIN_NO(40) | 6)
254#define MT6397_PIN_40_CEC__FUNC_TEST_OUT31 (MTK_PIN_NO(40) | 7)
255
256#endif /* __DTS_MT6397_PINFUNC_H */
diff --git a/include/dt-bindings/reset-controller/mt8135-resets.h b/include/dt-bindings/reset-controller/mt8135-resets.h
new file mode 100644
index 000000000000..1fb629508db2
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt8135-resets.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu, MediaTek
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8135
16#define _DT_BINDINGS_RESET_CONTROLLER_MT8135
17
18/* INFRACFG resets */
19#define MT8135_INFRA_EMI_REG_RST 0
20#define MT8135_INFRA_DRAMC0_A0_RST 1
21#define MT8135_INFRA_CCIF0_RST 2
22#define MT8135_INFRA_APCIRQ_EINT_RST 3
23#define MT8135_INFRA_APXGPT_RST 4
24#define MT8135_INFRA_SCPSYS_RST 5
25#define MT8135_INFRA_CCIF1_RST 6
26#define MT8135_INFRA_PMIC_WRAP_RST 7
27#define MT8135_INFRA_KP_RST 8
28#define MT8135_INFRA_EMI_RST 32
29#define MT8135_INFRA_DRAMC0_RST 34
30#define MT8135_INFRA_SMI_RST 35
31#define MT8135_INFRA_M4U_RST 36
32
33/* PERICFG resets */
34#define MT8135_PERI_UART0_SW_RST 0
35#define MT8135_PERI_UART1_SW_RST 1
36#define MT8135_PERI_UART2_SW_RST 2
37#define MT8135_PERI_UART3_SW_RST 3
38#define MT8135_PERI_IRDA_SW_RST 4
39#define MT8135_PERI_PTP_SW_RST 5
40#define MT8135_PERI_AP_HIF_SW_RST 6
41#define MT8135_PERI_GPCU_SW_RST 7
42#define MT8135_PERI_MD_HIF_SW_RST 8
43#define MT8135_PERI_NLI_SW_RST 9
44#define MT8135_PERI_AUXADC_SW_RST 10
45#define MT8135_PERI_DMA_SW_RST 11
46#define MT8135_PERI_NFI_SW_RST 14
47#define MT8135_PERI_PWM_SW_RST 15
48#define MT8135_PERI_THERM_SW_RST 16
49#define MT8135_PERI_MSDC0_SW_RST 17
50#define MT8135_PERI_MSDC1_SW_RST 18
51#define MT8135_PERI_MSDC2_SW_RST 19
52#define MT8135_PERI_MSDC3_SW_RST 20
53#define MT8135_PERI_I2C0_SW_RST 22
54#define MT8135_PERI_I2C1_SW_RST 23
55#define MT8135_PERI_I2C2_SW_RST 24
56#define MT8135_PERI_I2C3_SW_RST 25
57#define MT8135_PERI_I2C4_SW_RST 26
58#define MT8135_PERI_I2C5_SW_RST 27
59#define MT8135_PERI_I2C6_SW_RST 28
60#define MT8135_PERI_USB_SW_RST 29
61#define MT8135_PERI_SPI1_SW_RST 33
62#define MT8135_PERI_PWRAP_BRIDGE_SW_RST 34
63
64#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8135 */
diff --git a/include/dt-bindings/reset-controller/mt8173-resets.h b/include/dt-bindings/reset-controller/mt8173-resets.h
new file mode 100644
index 000000000000..9464b37cf68c
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt8173-resets.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu, MediaTek
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8173
16#define _DT_BINDINGS_RESET_CONTROLLER_MT8173
17
18/* INFRACFG resets */
19#define MT8173_INFRA_EMI_REG_RST 0
20#define MT8173_INFRA_DRAMC0_A0_RST 1
21#define MT8173_INFRA_APCIRQ_EINT_RST 3
22#define MT8173_INFRA_APXGPT_RST 4
23#define MT8173_INFRA_SCPSYS_RST 5
24#define MT8173_INFRA_KP_RST 6
25#define MT8173_INFRA_PMIC_WRAP_RST 7
26#define MT8173_INFRA_MPIP_RST 8
27#define MT8173_INFRA_CEC_RST 9
28#define MT8173_INFRA_EMI_RST 32
29#define MT8173_INFRA_DRAMC0_RST 34
30#define MT8173_INFRA_APMIXEDSYS_RST 35
31#define MT8173_INFRA_MIPI_DSI_RST 36
32#define MT8173_INFRA_TRNG_RST 37
33#define MT8173_INFRA_SYSIRQ_RST 38
34#define MT8173_INFRA_MIPI_CSI_RST 39
35#define MT8173_INFRA_GCE_FAXI_RST 40
36#define MT8173_INFRA_MMIOMMURST 47
37
38
39/* PERICFG resets */
40#define MT8173_PERI_UART0_SW_RST 0
41#define MT8173_PERI_UART1_SW_RST 1
42#define MT8173_PERI_UART2_SW_RST 2
43#define MT8173_PERI_UART3_SW_RST 3
44#define MT8173_PERI_IRRX_SW_RST 4
45#define MT8173_PERI_PWM_SW_RST 8
46#define MT8173_PERI_AUXADC_SW_RST 10
47#define MT8173_PERI_DMA_SW_RST 11
48#define MT8173_PERI_I2C6_SW_RST 13
49#define MT8173_PERI_NFI_SW_RST 14
50#define MT8173_PERI_THERM_SW_RST 16
51#define MT8173_PERI_MSDC2_SW_RST 17
52#define MT8173_PERI_MSDC3_SW_RST 18
53#define MT8173_PERI_MSDC0_SW_RST 19
54#define MT8173_PERI_MSDC1_SW_RST 20
55#define MT8173_PERI_I2C0_SW_RST 22
56#define MT8173_PERI_I2C1_SW_RST 23
57#define MT8173_PERI_I2C2_SW_RST 24
58#define MT8173_PERI_I2C3_SW_RST 25
59#define MT8173_PERI_I2C4_SW_RST 26
60#define MT8173_PERI_HDMI_SW_RST 29
61#define MT8173_PERI_SPI0_SW_RST 33
62
63#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8173 */
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 0ad5ef930b5d..de9c8140931a 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -129,4 +129,47 @@
129#define USB30_1_PHY_RESET 112 129#define USB30_1_PHY_RESET 112
130#define NSSFB0_RESET 113 130#define NSSFB0_RESET 113
131#define NSSFB1_RESET 114 131#define NSSFB1_RESET 114
132#define UBI32_CORE1_CLKRST_CLAMP_RESET 115
133#define UBI32_CORE1_CLAMP_RESET 116
134#define UBI32_CORE1_AHB_RESET 117
135#define UBI32_CORE1_AXI_RESET 118
136#define UBI32_CORE2_CLKRST_CLAMP_RESET 119
137#define UBI32_CORE2_CLAMP_RESET 120
138#define UBI32_CORE2_AHB_RESET 121
139#define UBI32_CORE2_AXI_RESET 122
140#define GMAC_CORE1_RESET 123
141#define GMAC_CORE2_RESET 124
142#define GMAC_CORE3_RESET 125
143#define GMAC_CORE4_RESET 126
144#define GMAC_AHB_RESET 127
145#define NSS_CH0_RST_RX_CLK_N_RESET 128
146#define NSS_CH0_RST_TX_CLK_N_RESET 129
147#define NSS_CH0_RST_RX_125M_N_RESET 130
148#define NSS_CH0_HW_RST_RX_125M_N_RESET 131
149#define NSS_CH0_RST_TX_125M_N_RESET 132
150#define NSS_CH1_RST_RX_CLK_N_RESET 133
151#define NSS_CH1_RST_TX_CLK_N_RESET 134
152#define NSS_CH1_RST_RX_125M_N_RESET 135
153#define NSS_CH1_HW_RST_RX_125M_N_RESET 136
154#define NSS_CH1_RST_TX_125M_N_RESET 137
155#define NSS_CH2_RST_RX_CLK_N_RESET 138
156#define NSS_CH2_RST_TX_CLK_N_RESET 139
157#define NSS_CH2_RST_RX_125M_N_RESET 140
158#define NSS_CH2_HW_RST_RX_125M_N_RESET 141
159#define NSS_CH2_RST_TX_125M_N_RESET 142
160#define NSS_CH3_RST_RX_CLK_N_RESET 143
161#define NSS_CH3_RST_TX_CLK_N_RESET 144
162#define NSS_CH3_RST_RX_125M_N_RESET 145
163#define NSS_CH3_HW_RST_RX_125M_N_RESET 146
164#define NSS_CH3_RST_TX_125M_N_RESET 147
165#define NSS_RST_RX_250M_125M_N_RESET 148
166#define NSS_RST_TX_250M_125M_N_RESET 149
167#define NSS_QSGMII_TXPI_RST_N_RESET 150
168#define NSS_QSGMII_CDR_RST_N_RESET 151
169#define NSS_SGMII2_CDR_RST_N_RESET 152
170#define NSS_SGMII3_CDR_RST_N_RESET 153
171#define NSS_CAL_PRBS_RST_N_RESET 154
172#define NSS_LCKDT_RST_N_RESET 155
173#define NSS_SRDS_N_RESET 156
174
132#endif 175#endif
diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h
new file mode 100644
index 000000000000..499076e980a3
--- /dev/null
+++ b/include/dt-bindings/sound/apq8016-lpass.h
@@ -0,0 +1,9 @@
1#ifndef __DT_APQ8016_LPASS_H
2#define __DT_APQ8016_LPASS_H
3
4#define MI2S_PRIMARY 0
5#define MI2S_SECONDARY 1
6#define MI2S_TERTIARY 2
7#define MI2S_QUATERNARY 3
8
9#endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/audio-jack-events.h b/include/dt-bindings/sound/audio-jack-events.h
new file mode 100644
index 000000000000..378349f28069
--- /dev/null
+++ b/include/dt-bindings/sound/audio-jack-events.h
@@ -0,0 +1,9 @@
1#ifndef __AUDIO_JACK_EVENTS_H
2#define __AUDIO_JACK_EVENTS_H
3
4#define JACK_HEADPHONE 1
5#define JACK_MICROPHONE 2
6#define JACK_LINEOUT 3
7#define JACK_LINEIN 4
8
9#endif /* __AUDIO_JACK_EVENTS_H */
diff --git a/include/dt-bindings/sound/tas2552.h b/include/dt-bindings/sound/tas2552.h
new file mode 100644
index 000000000000..a4e1a079980b
--- /dev/null
+++ b/include/dt-bindings/sound/tas2552.h
@@ -0,0 +1,18 @@
1#ifndef __DT_TAS2552_H
2#define __DT_TAS2552_H
3
4#define TAS2552_PLL_CLKIN (0)
5#define TAS2552_PDM_CLK (1)
6#define TAS2552_CLK_TARGET_MASK (1)
7
8#define TAS2552_PLL_CLKIN_MCLK ((0 << 1) | TAS2552_PLL_CLKIN)
9#define TAS2552_PLL_CLKIN_BCLK ((1 << 1) | TAS2552_PLL_CLKIN)
10#define TAS2552_PLL_CLKIN_IVCLKIN ((2 << 1) | TAS2552_PLL_CLKIN)
11#define TAS2552_PLL_CLKIN_1_8_FIXED ((3 << 1) | TAS2552_PLL_CLKIN)
12
13#define TAS2552_PDM_CLK_PLL ((0 << 1) | TAS2552_PDM_CLK)
14#define TAS2552_PDM_CLK_IVCLKIN ((1 << 1) | TAS2552_PDM_CLK)
15#define TAS2552_PDM_CLK_BCLK ((2 << 1) | TAS2552_PDM_CLK)
16#define TAS2552_PDM_CLK_MCLK ((3 << 1) | TAS2552_PDM_CLK)
17
18#endif /* __DT_TAS2552_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f57c440642cd..d2445fa9999f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -53,11 +53,24 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
53 return adev ? adev->handle : NULL; 53 return adev ? adev->handle : NULL;
54} 54}
55 55
56#define ACPI_COMPANION(dev) acpi_node((dev)->fwnode) 56#define ACPI_COMPANION(dev) to_acpi_node((dev)->fwnode)
57#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ 57#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
58 acpi_fwnode_handle(adev) : NULL) 58 acpi_fwnode_handle(adev) : NULL)
59#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) 59#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
60 60
61/**
62 * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
63 * the PCI-defined class-code information
64 *
65 * @_cls : the class, subclass, prog-if triple for this device
66 * @_msk : the class mask for this device
67 *
68 * This macro is used to create a struct acpi_device_id that matches a
69 * specific PCI class. The .id and .driver_data fields will be left
70 * initialized with the default value.
71 */
72#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
73
61static inline bool has_acpi_companion(struct device *dev) 74static inline bool has_acpi_companion(struct device *dev)
62{ 75{
63 return is_acpi_node(dev->fwnode); 76 return is_acpi_node(dev->fwnode);
@@ -158,6 +171,16 @@ typedef u32 phys_cpuid_t;
158#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) 171#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
159#endif 172#endif
160 173
174static inline bool invalid_logical_cpuid(u32 cpuid)
175{
176 return (int)cpuid < 0;
177}
178
179static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
180{
181 return phys_id == PHYS_CPUID_INVALID;
182}
183
161#ifdef CONFIG_ACPI_HOTPLUG_CPU 184#ifdef CONFIG_ACPI_HOTPLUG_CPU
162/* Arch dependent functions for cpu hotplug support */ 185/* Arch dependent functions for cpu hotplug support */
163int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); 186int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
@@ -243,54 +266,21 @@ extern bool wmi_has_guid(const char *guid);
243#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 266#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
244#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 267#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
245 268
246#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 269extern char acpi_video_backlight_string[];
247
248extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
249extern long acpi_is_video_device(acpi_handle handle); 270extern long acpi_is_video_device(acpi_handle handle);
250extern void acpi_video_dmi_promote_vendor(void);
251extern void acpi_video_dmi_demote_vendor(void);
252extern int acpi_video_backlight_support(void);
253extern int acpi_video_display_switch_support(void);
254
255#else
256
257static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
258{
259 return 0;
260}
261
262static inline long acpi_is_video_device(acpi_handle handle)
263{
264 return 0;
265}
266
267static inline void acpi_video_dmi_promote_vendor(void)
268{
269}
270
271static inline void acpi_video_dmi_demote_vendor(void)
272{
273}
274
275static inline int acpi_video_backlight_support(void)
276{
277 return 0;
278}
279
280static inline int acpi_video_display_switch_support(void)
281{
282 return 0;
283}
284
285#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
286
287extern int acpi_blacklisted(void); 271extern int acpi_blacklisted(void);
288extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); 272extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
289extern void acpi_osi_setup(char *str); 273extern void acpi_osi_setup(char *str);
274extern bool acpi_osi_is_win8(void);
290 275
291#ifdef CONFIG_ACPI_NUMA 276#ifdef CONFIG_ACPI_NUMA
277int acpi_map_pxm_to_online_node(int pxm);
292int acpi_get_node(acpi_handle handle); 278int acpi_get_node(acpi_handle handle);
293#else 279#else
280static inline int acpi_map_pxm_to_online_node(int pxm)
281{
282 return 0;
283}
294static inline int acpi_get_node(acpi_handle handle) 284static inline int acpi_get_node(acpi_handle handle)
295{ 285{
296 return 0; 286 return 0;
@@ -440,6 +430,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
440#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 430#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
441 431
442extern void acpi_early_init(void); 432extern void acpi_early_init(void);
433extern void acpi_subsystem_init(void);
443 434
444extern int acpi_nvs_register(__u64 start, __u64 size); 435extern int acpi_nvs_register(__u64 start, __u64 size);
445 436
@@ -465,6 +456,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
465#define ACPI_COMPANION(dev) (NULL) 456#define ACPI_COMPANION(dev) (NULL)
466#define ACPI_COMPANION_SET(dev, adev) do { } while (0) 457#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
467#define ACPI_HANDLE(dev) (NULL) 458#define ACPI_HANDLE(dev) (NULL)
459#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
468 460
469struct fwnode_handle; 461struct fwnode_handle;
470 462
@@ -473,7 +465,7 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode)
473 return false; 465 return false;
474} 466}
475 467
476static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) 468static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode)
477{ 469{
478 return NULL; 470 return NULL;
479} 471}
@@ -494,6 +486,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
494} 486}
495 487
496static inline void acpi_early_init(void) { } 488static inline void acpi_early_init(void) { }
489static inline void acpi_subsystem_init(void) { }
497 490
498static inline int early_acpi_boot_init(void) 491static inline int early_acpi_boot_init(void)
499{ 492{
@@ -569,6 +562,11 @@ static inline int acpi_device_modalias(struct device *dev,
569 return -ENODEV; 562 return -ENODEV;
570} 563}
571 564
565static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
566{
567 return false;
568}
569
572#define ACPI_PTR(_ptr) (NULL) 570#define ACPI_PTR(_ptr) (NULL)
573 571
574#endif /* !CONFIG_ACPI */ 572#endif /* !CONFIG_ACPI */
diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h
index c7df89f99115..58fe9e8b6fd7 100644
--- a/include/linux/amba/sp810.h
+++ b/include/linux/amba/sp810.h
@@ -2,7 +2,7 @@
2 * ARM PrimeXsys System Controller SP810 header file 2 * ARM PrimeXsys System Controller SP810 header file
3 * 3 *
4 * Copyright (C) 2009 ST Microelectronics 4 * Copyright (C) 2009 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/ata.h b/include/linux/ata.h
index b666b773e111..6c78956aa470 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -45,6 +45,7 @@ enum {
45 ATA_SECT_SIZE = 512, 45 ATA_SECT_SIZE = 512,
46 ATA_MAX_SECTORS_128 = 128, 46 ATA_MAX_SECTORS_128 = 128,
47 ATA_MAX_SECTORS = 256, 47 ATA_MAX_SECTORS = 256,
48 ATA_MAX_SECTORS_1024 = 1024,
48 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ 49 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
49 ATA_MAX_SECTORS_TAPE = 65535, 50 ATA_MAX_SECTORS_TAPE = 65535,
50 51
@@ -704,9 +705,19 @@ static inline bool ata_id_wcache_enabled(const u16 *id)
704 705
705static inline bool ata_id_has_read_log_dma_ext(const u16 *id) 706static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
706{ 707{
708 /* Word 86 must have bit 15 set */
707 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) 709 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
708 return false; 710 return false;
709 return id[ATA_ID_COMMAND_SET_3] & (1 << 3); 711
712 /* READ LOG DMA EXT support can be signaled either from word 119
713 * or from word 120. The format is the same for both words: Bit
714 * 15 must be cleared, bit 14 set and bit 3 set.
715 */
716 if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 ||
717 (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008)
718 return true;
719
720 return false;
710} 721}
711 722
712static inline bool ata_id_has_sense_reporting(const u16 *id) 723static inline bool ata_id_has_sense_reporting(const u16 *id)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
new file mode 100644
index 000000000000..a23209b43842
--- /dev/null
+++ b/include/linux/backing-dev-defs.h
@@ -0,0 +1,256 @@
1#ifndef __LINUX_BACKING_DEV_DEFS_H
2#define __LINUX_BACKING_DEV_DEFS_H
3
4#include <linux/list.h>
5#include <linux/radix-tree.h>
6#include <linux/rbtree.h>
7#include <linux/spinlock.h>
8#include <linux/percpu_counter.h>
9#include <linux/percpu-refcount.h>
10#include <linux/flex_proportions.h>
11#include <linux/timer.h>
12#include <linux/workqueue.h>
13
14struct page;
15struct device;
16struct dentry;
17
18/*
19 * Bits in bdi_writeback.state
20 */
21enum wb_state {
22 WB_registered, /* bdi_register() was done */
23 WB_writeback_running, /* Writeback is in progress */
24 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
25};
26
27enum wb_congested_state {
28 WB_async_congested, /* The async (write) queue is getting full */
29 WB_sync_congested, /* The sync queue is getting full */
30};
31
32typedef int (congested_fn)(void *, int);
33
34enum wb_stat_item {
35 WB_RECLAIMABLE,
36 WB_WRITEBACK,
37 WB_DIRTIED,
38 WB_WRITTEN,
39 NR_WB_STAT_ITEMS
40};
41
42#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
43
44/*
45 * For cgroup writeback, multiple wb's may map to the same blkcg. Those
46 * wb's can operate mostly independently but should share the congested
47 * state. To facilitate such sharing, the congested state is tracked using
48 * the following struct which is created on demand, indexed by blkcg ID on
49 * its bdi, and refcounted.
50 */
51struct bdi_writeback_congested {
52 unsigned long state; /* WB_[a]sync_congested flags */
53 atomic_t refcnt; /* nr of attached wb's and blkg */
54
55#ifdef CONFIG_CGROUP_WRITEBACK
56 struct backing_dev_info *bdi; /* the associated bdi */
57 int blkcg_id; /* ID of the associated blkcg */
58 struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
59#endif
60};
61
62/*
63 * Each wb (bdi_writeback) can perform writeback operations, is measured
64 * and throttled, independently. Without cgroup writeback, each bdi
65 * (bdi_writeback) is served by its embedded bdi->wb.
66 *
67 * On the default hierarchy, blkcg implicitly enables memcg. This allows
68 * using memcg's page ownership for attributing writeback IOs, and every
69 * memcg - blkcg combination can be served by its own wb by assigning a
70 * dedicated wb to each memcg, which enables isolation across different
71 * cgroups and propagation of IO back pressure down from the IO layer upto
72 * the tasks which are generating the dirty pages to be written back.
73 *
74 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
75 * refcounted with the number of inodes attached to it, and pins the memcg
76 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
77 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
78 * is tested for blkcg after lookup and removed from index on mismatch so
79 * that a new wb for the combination can be created.
80 */
81struct bdi_writeback {
82 struct backing_dev_info *bdi; /* our parent bdi */
83
84 unsigned long state; /* Always use atomic bitops on this */
85 unsigned long last_old_flush; /* last old data flush */
86
87 struct list_head b_dirty; /* dirty inodes */
88 struct list_head b_io; /* parked for writeback */
89 struct list_head b_more_io; /* parked for more writeback */
90 struct list_head b_dirty_time; /* time stamps are dirty */
91 spinlock_t list_lock; /* protects the b_* lists */
92
93 struct percpu_counter stat[NR_WB_STAT_ITEMS];
94
95 struct bdi_writeback_congested *congested;
96
97 unsigned long bw_time_stamp; /* last time write bw is updated */
98 unsigned long dirtied_stamp;
99 unsigned long written_stamp; /* pages written at bw_time_stamp */
100 unsigned long write_bandwidth; /* the estimated write bandwidth */
101 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
102
103 /*
104 * The base dirty throttle rate, re-calculated on every 200ms.
105 * All the bdi tasks' dirty rate will be curbed under it.
106 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
107 * in small steps and is much more smooth/stable than the latter.
108 */
109 unsigned long dirty_ratelimit;
110 unsigned long balanced_dirty_ratelimit;
111
112 struct fprop_local_percpu completions;
113 int dirty_exceeded;
114
115 spinlock_t work_lock; /* protects work_list & dwork scheduling */
116 struct list_head work_list;
117 struct delayed_work dwork; /* work item used for writeback */
118
119#ifdef CONFIG_CGROUP_WRITEBACK
120 struct percpu_ref refcnt; /* used only for !root wb's */
121 struct fprop_local_percpu memcg_completions;
122 struct cgroup_subsys_state *memcg_css; /* the associated memcg */
123 struct cgroup_subsys_state *blkcg_css; /* and blkcg */
124 struct list_head memcg_node; /* anchored at memcg->cgwb_list */
125 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
126
127 union {
128 struct work_struct release_work;
129 struct rcu_head rcu;
130 };
131#endif
132};
133
134struct backing_dev_info {
135 struct list_head bdi_list;
136 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
137 unsigned int capabilities; /* Device capabilities */
138 congested_fn *congested_fn; /* Function pointer if device is md/dm */
139 void *congested_data; /* Pointer to aux data for congested func */
140
141 char *name;
142
143 unsigned int min_ratio;
144 unsigned int max_ratio, max_prop_frac;
145
146 /*
147 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
148 * any dirty wbs, which is depended upon by bdi_has_dirty().
149 */
150 atomic_long_t tot_write_bandwidth;
151
152 struct bdi_writeback wb; /* the root writeback info for this bdi */
153#ifdef CONFIG_CGROUP_WRITEBACK
154 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
155 struct rb_root cgwb_congested_tree; /* their congested states */
156 atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
157#else
158 struct bdi_writeback_congested *wb_congested;
159#endif
160 wait_queue_head_t wb_waitq;
161
162 struct device *dev;
163
164 struct timer_list laptop_mode_wb_timer;
165
166#ifdef CONFIG_DEBUG_FS
167 struct dentry *debug_dir;
168 struct dentry *debug_stats;
169#endif
170};
171
172enum {
173 BLK_RW_ASYNC = 0,
174 BLK_RW_SYNC = 1,
175};
176
177void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
178void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
179
180static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
181{
182 clear_wb_congested(bdi->wb.congested, sync);
183}
184
185static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
186{
187 set_wb_congested(bdi->wb.congested, sync);
188}
189
190#ifdef CONFIG_CGROUP_WRITEBACK
191
192/**
193 * wb_tryget - try to increment a wb's refcount
194 * @wb: bdi_writeback to get
195 */
196static inline bool wb_tryget(struct bdi_writeback *wb)
197{
198 if (wb != &wb->bdi->wb)
199 return percpu_ref_tryget(&wb->refcnt);
200 return true;
201}
202
203/**
204 * wb_get - increment a wb's refcount
205 * @wb: bdi_writeback to get
206 */
207static inline void wb_get(struct bdi_writeback *wb)
208{
209 if (wb != &wb->bdi->wb)
210 percpu_ref_get(&wb->refcnt);
211}
212
213/**
214 * wb_put - decrement a wb's refcount
215 * @wb: bdi_writeback to put
216 */
217static inline void wb_put(struct bdi_writeback *wb)
218{
219 if (wb != &wb->bdi->wb)
220 percpu_ref_put(&wb->refcnt);
221}
222
223/**
224 * wb_dying - is a wb dying?
225 * @wb: bdi_writeback of interest
226 *
227 * Returns whether @wb is unlinked and being drained.
228 */
229static inline bool wb_dying(struct bdi_writeback *wb)
230{
231 return percpu_ref_is_dying(&wb->refcnt);
232}
233
234#else /* CONFIG_CGROUP_WRITEBACK */
235
236static inline bool wb_tryget(struct bdi_writeback *wb)
237{
238 return true;
239}
240
241static inline void wb_get(struct bdi_writeback *wb)
242{
243}
244
245static inline void wb_put(struct bdi_writeback *wb)
246{
247}
248
249static inline bool wb_dying(struct bdi_writeback *wb)
250{
251 return false;
252}
253
254#endif /* CONFIG_CGROUP_WRITEBACK */
255
256#endif /* __LINUX_BACKING_DEV_DEFS_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index d87d8eced064..0fe9df983ab7 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -8,106 +8,14 @@
8#ifndef _LINUX_BACKING_DEV_H 8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H 9#define _LINUX_BACKING_DEV_H
10 10
11#include <linux/percpu_counter.h>
12#include <linux/log2.h>
13#include <linux/flex_proportions.h>
14#include <linux/kernel.h> 11#include <linux/kernel.h>
15#include <linux/fs.h> 12#include <linux/fs.h>
16#include <linux/sched.h> 13#include <linux/sched.h>
17#include <linux/timer.h> 14#include <linux/blkdev.h>
18#include <linux/writeback.h> 15#include <linux/writeback.h>
19#include <linux/atomic.h> 16#include <linux/blk-cgroup.h>
20#include <linux/sysctl.h> 17#include <linux/backing-dev-defs.h>
21#include <linux/workqueue.h> 18#include <linux/slab.h>
22
23struct page;
24struct device;
25struct dentry;
26
27/*
28 * Bits in backing_dev_info.state
29 */
30enum bdi_state {
31 BDI_async_congested, /* The async (write) queue is getting full */
32 BDI_sync_congested, /* The sync queue is getting full */
33 BDI_registered, /* bdi_register() was done */
34 BDI_writeback_running, /* Writeback is in progress */
35};
36
37typedef int (congested_fn)(void *, int);
38
39enum bdi_stat_item {
40 BDI_RECLAIMABLE,
41 BDI_WRITEBACK,
42 BDI_DIRTIED,
43 BDI_WRITTEN,
44 NR_BDI_STAT_ITEMS
45};
46
47#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
48
49struct bdi_writeback {
50 struct backing_dev_info *bdi; /* our parent bdi */
51
52 unsigned long last_old_flush; /* last old data flush */
53
54 struct delayed_work dwork; /* work item used for writeback */
55 struct list_head b_dirty; /* dirty inodes */
56 struct list_head b_io; /* parked for writeback */
57 struct list_head b_more_io; /* parked for more writeback */
58 struct list_head b_dirty_time; /* time stamps are dirty */
59 spinlock_t list_lock; /* protects the b_* lists */
60};
61
62struct backing_dev_info {
63 struct list_head bdi_list;
64 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
65 unsigned long state; /* Always use atomic bitops on this */
66 unsigned int capabilities; /* Device capabilities */
67 congested_fn *congested_fn; /* Function pointer if device is md/dm */
68 void *congested_data; /* Pointer to aux data for congested func */
69
70 char *name;
71
72 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
73
74 unsigned long bw_time_stamp; /* last time write bw is updated */
75 unsigned long dirtied_stamp;
76 unsigned long written_stamp; /* pages written at bw_time_stamp */
77 unsigned long write_bandwidth; /* the estimated write bandwidth */
78 unsigned long avg_write_bandwidth; /* further smoothed write bw */
79
80 /*
81 * The base dirty throttle rate, re-calculated on every 200ms.
82 * All the bdi tasks' dirty rate will be curbed under it.
83 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
84 * in small steps and is much more smooth/stable than the latter.
85 */
86 unsigned long dirty_ratelimit;
87 unsigned long balanced_dirty_ratelimit;
88
89 struct fprop_local_percpu completions;
90 int dirty_exceeded;
91
92 unsigned int min_ratio;
93 unsigned int max_ratio, max_prop_frac;
94
95 struct bdi_writeback wb; /* default writeback info for this bdi */
96 spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
97
98 struct list_head work_list;
99
100 struct device *dev;
101
102 struct timer_list laptop_mode_wb_timer;
103
104#ifdef CONFIG_DEBUG_FS
105 struct dentry *debug_dir;
106 struct dentry *debug_stats;
107#endif
108};
109
110struct backing_dev_info *inode_to_bdi(struct inode *inode);
111 19
112int __must_check bdi_init(struct backing_dev_info *bdi); 20int __must_check bdi_init(struct backing_dev_info *bdi);
113void bdi_destroy(struct backing_dev_info *bdi); 21void bdi_destroy(struct backing_dev_info *bdi);
@@ -117,97 +25,99 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
117 const char *fmt, ...); 25 const char *fmt, ...);
118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 26int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
119int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 27int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
120void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 28void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
121 enum wb_reason reason); 29 bool range_cyclic, enum wb_reason reason);
122void bdi_start_background_writeback(struct backing_dev_info *bdi); 30void wb_start_background_writeback(struct bdi_writeback *wb);
123void bdi_writeback_workfn(struct work_struct *work); 31void wb_workfn(struct work_struct *work);
124int bdi_has_dirty_io(struct backing_dev_info *bdi); 32void wb_wakeup_delayed(struct bdi_writeback *wb);
125void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
126 33
127extern spinlock_t bdi_lock; 34extern spinlock_t bdi_lock;
128extern struct list_head bdi_list; 35extern struct list_head bdi_list;
129 36
130extern struct workqueue_struct *bdi_wq; 37extern struct workqueue_struct *bdi_wq;
131 38
132static inline int wb_has_dirty_io(struct bdi_writeback *wb) 39static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
133{ 40{
134 return !list_empty(&wb->b_dirty) || 41 return test_bit(WB_has_dirty_io, &wb->state);
135 !list_empty(&wb->b_io) || 42}
136 !list_empty(&wb->b_more_io); 43
44static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
45{
46 /*
47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
48 * any dirty wbs. See wb_update_write_bandwidth().
49 */
50 return atomic_long_read(&bdi->tot_write_bandwidth);
137} 51}
138 52
139static inline void __add_bdi_stat(struct backing_dev_info *bdi, 53static inline void __add_wb_stat(struct bdi_writeback *wb,
140 enum bdi_stat_item item, s64 amount) 54 enum wb_stat_item item, s64 amount)
141{ 55{
142 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); 56 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
143} 57}
144 58
145static inline void __inc_bdi_stat(struct backing_dev_info *bdi, 59static inline void __inc_wb_stat(struct bdi_writeback *wb,
146 enum bdi_stat_item item) 60 enum wb_stat_item item)
147{ 61{
148 __add_bdi_stat(bdi, item, 1); 62 __add_wb_stat(wb, item, 1);
149} 63}
150 64
151static inline void inc_bdi_stat(struct backing_dev_info *bdi, 65static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
152 enum bdi_stat_item item)
153{ 66{
154 unsigned long flags; 67 unsigned long flags;
155 68
156 local_irq_save(flags); 69 local_irq_save(flags);
157 __inc_bdi_stat(bdi, item); 70 __inc_wb_stat(wb, item);
158 local_irq_restore(flags); 71 local_irq_restore(flags);
159} 72}
160 73
161static inline void __dec_bdi_stat(struct backing_dev_info *bdi, 74static inline void __dec_wb_stat(struct bdi_writeback *wb,
162 enum bdi_stat_item item) 75 enum wb_stat_item item)
163{ 76{
164 __add_bdi_stat(bdi, item, -1); 77 __add_wb_stat(wb, item, -1);
165} 78}
166 79
167static inline void dec_bdi_stat(struct backing_dev_info *bdi, 80static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
168 enum bdi_stat_item item)
169{ 81{
170 unsigned long flags; 82 unsigned long flags;
171 83
172 local_irq_save(flags); 84 local_irq_save(flags);
173 __dec_bdi_stat(bdi, item); 85 __dec_wb_stat(wb, item);
174 local_irq_restore(flags); 86 local_irq_restore(flags);
175} 87}
176 88
177static inline s64 bdi_stat(struct backing_dev_info *bdi, 89static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
178 enum bdi_stat_item item)
179{ 90{
180 return percpu_counter_read_positive(&bdi->bdi_stat[item]); 91 return percpu_counter_read_positive(&wb->stat[item]);
181} 92}
182 93
183static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, 94static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
184 enum bdi_stat_item item) 95 enum wb_stat_item item)
185{ 96{
186 return percpu_counter_sum_positive(&bdi->bdi_stat[item]); 97 return percpu_counter_sum_positive(&wb->stat[item]);
187} 98}
188 99
189static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, 100static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
190 enum bdi_stat_item item)
191{ 101{
192 s64 sum; 102 s64 sum;
193 unsigned long flags; 103 unsigned long flags;
194 104
195 local_irq_save(flags); 105 local_irq_save(flags);
196 sum = __bdi_stat_sum(bdi, item); 106 sum = __wb_stat_sum(wb, item);
197 local_irq_restore(flags); 107 local_irq_restore(flags);
198 108
199 return sum; 109 return sum;
200} 110}
201 111
202extern void bdi_writeout_inc(struct backing_dev_info *bdi); 112extern void wb_writeout_inc(struct bdi_writeback *wb);
203 113
204/* 114/*
205 * maximal error of a stat counter. 115 * maximal error of a stat counter.
206 */ 116 */
207static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) 117static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
208{ 118{
209#ifdef CONFIG_SMP 119#ifdef CONFIG_SMP
210 return nr_cpu_ids * BDI_STAT_BATCH; 120 return nr_cpu_ids * WB_STAT_BATCH;
211#else 121#else
212 return 1; 122 return 1;
213#endif 123#endif
@@ -231,50 +141,57 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
231 * BDI_CAP_NO_WRITEBACK: Don't write pages back 141 * BDI_CAP_NO_WRITEBACK: Don't write pages back
232 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
233 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
144 *
145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
234 */ 146 */
235#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 147#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
236#define BDI_CAP_NO_WRITEBACK 0x00000002 148#define BDI_CAP_NO_WRITEBACK 0x00000002
237#define BDI_CAP_NO_ACCT_WB 0x00000004 149#define BDI_CAP_NO_ACCT_WB 0x00000004
238#define BDI_CAP_STABLE_WRITES 0x00000008 150#define BDI_CAP_STABLE_WRITES 0x00000008
239#define BDI_CAP_STRICTLIMIT 0x00000010 151#define BDI_CAP_STRICTLIMIT 0x00000010
152#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
240 153
241#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 154#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
242 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
243 156
244extern struct backing_dev_info noop_backing_dev_info; 157extern struct backing_dev_info noop_backing_dev_info;
245 158
246int writeback_in_progress(struct backing_dev_info *bdi); 159/**
247 160 * writeback_in_progress - determine whether there is writeback in progress
248static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) 161 * @wb: bdi_writeback of interest
162 *
163 * Determine whether there is writeback waiting to be handled against a
164 * bdi_writeback.
165 */
166static inline bool writeback_in_progress(struct bdi_writeback *wb)
249{ 167{
250 if (bdi->congested_fn) 168 return test_bit(WB_writeback_running, &wb->state);
251 return bdi->congested_fn(bdi->congested_data, bdi_bits);
252 return (bdi->state & bdi_bits);
253} 169}
254 170
255static inline int bdi_read_congested(struct backing_dev_info *bdi) 171static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
256{ 172{
257 return bdi_congested(bdi, 1 << BDI_sync_congested); 173 struct super_block *sb;
258}
259 174
260static inline int bdi_write_congested(struct backing_dev_info *bdi) 175 if (!inode)
261{ 176 return &noop_backing_dev_info;
262 return bdi_congested(bdi, 1 << BDI_async_congested); 177
178 sb = inode->i_sb;
179#ifdef CONFIG_BLOCK
180 if (sb_is_blkdev_sb(sb))
181 return blk_get_backing_dev_info(I_BDEV(inode));
182#endif
183 return sb->s_bdi;
263} 184}
264 185
265static inline int bdi_rw_congested(struct backing_dev_info *bdi) 186static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
266{ 187{
267 return bdi_congested(bdi, (1 << BDI_sync_congested) | 188 struct backing_dev_info *bdi = wb->bdi;
268 (1 << BDI_async_congested));
269}
270 189
271enum { 190 if (bdi->congested_fn)
272 BLK_RW_ASYNC = 0, 191 return bdi->congested_fn(bdi->congested_data, cong_bits);
273 BLK_RW_SYNC = 1, 192 return wb->congested->state & cong_bits;
274}; 193}
275 194
276void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
277void set_bdi_congested(struct backing_dev_info *bdi, int sync);
278long congestion_wait(int sync, long timeout); 195long congestion_wait(int sync, long timeout);
279long wait_iff_congested(struct zone *zone, int sync, long timeout); 196long wait_iff_congested(struct zone *zone, int sync, long timeout);
280int pdflush_proc_obsolete(struct ctl_table *table, int write, 197int pdflush_proc_obsolete(struct ctl_table *table, int write,
@@ -318,4 +235,336 @@ static inline int bdi_sched_wait(void *word)
318 return 0; 235 return 0;
319} 236}
320 237
321#endif /* _LINUX_BACKING_DEV_H */ 238#ifdef CONFIG_CGROUP_WRITEBACK
239
240struct bdi_writeback_congested *
241wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
242void wb_congested_put(struct bdi_writeback_congested *congested);
243struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
244 struct cgroup_subsys_state *memcg_css,
245 gfp_t gfp);
246void wb_memcg_offline(struct mem_cgroup *memcg);
247void wb_blkcg_offline(struct blkcg *blkcg);
248int inode_congested(struct inode *inode, int cong_bits);
249
250/**
251 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
252 * @inode: inode of interest
253 *
254 * cgroup writeback requires support from both the bdi and filesystem.
255 * Test whether @inode has both.
256 */
257static inline bool inode_cgwb_enabled(struct inode *inode)
258{
259 struct backing_dev_info *bdi = inode_to_bdi(inode);
260
261 return bdi_cap_account_dirty(bdi) &&
262 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
263 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
264}
265
266/**
267 * wb_find_current - find wb for %current on a bdi
268 * @bdi: bdi of interest
269 *
270 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
271 * Must be called under rcu_read_lock() which protects the returend wb.
272 * NULL if not found.
273 */
274static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
275{
276 struct cgroup_subsys_state *memcg_css;
277 struct bdi_writeback *wb;
278
279 memcg_css = task_css(current, memory_cgrp_id);
280 if (!memcg_css->parent)
281 return &bdi->wb;
282
283 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
284
285 /*
286 * %current's blkcg equals the effective blkcg of its memcg. No
287 * need to use the relatively expensive cgroup_get_e_css().
288 */
289 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
290 return wb;
291 return NULL;
292}
293
294/**
295 * wb_get_create_current - get or create wb for %current on a bdi
296 * @bdi: bdi of interest
297 * @gfp: allocation mask
298 *
299 * Equivalent to wb_get_create() on %current's memcg. This function is
300 * called from a relatively hot path and optimizes the common cases using
301 * wb_find_current().
302 */
303static inline struct bdi_writeback *
304wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
305{
306 struct bdi_writeback *wb;
307
308 rcu_read_lock();
309 wb = wb_find_current(bdi);
310 if (wb && unlikely(!wb_tryget(wb)))
311 wb = NULL;
312 rcu_read_unlock();
313
314 if (unlikely(!wb)) {
315 struct cgroup_subsys_state *memcg_css;
316
317 memcg_css = task_get_css(current, memory_cgrp_id);
318 wb = wb_get_create(bdi, memcg_css, gfp);
319 css_put(memcg_css);
320 }
321 return wb;
322}
323
324/**
325 * inode_to_wb_is_valid - test whether an inode has a wb associated
326 * @inode: inode of interest
327 *
328 * Returns %true if @inode has a wb associated. May be called without any
329 * locking.
330 */
331static inline bool inode_to_wb_is_valid(struct inode *inode)
332{
333 return inode->i_wb;
334}
335
336/**
337 * inode_to_wb - determine the wb of an inode
338 * @inode: inode of interest
339 *
340 * Returns the wb @inode is currently associated with. The caller must be
341 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
342 * associated wb's list_lock.
343 */
344static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
345{
346#ifdef CONFIG_LOCKDEP
347 WARN_ON_ONCE(debug_locks &&
348 (!lockdep_is_held(&inode->i_lock) &&
349 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
350 !lockdep_is_held(&inode->i_wb->list_lock)));
351#endif
352 return inode->i_wb;
353}
354
355/**
356 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
357 * @inode: target inode
358 * @lockedp: temp bool output param, to be passed to the end function
359 *
360 * The caller wants to access the wb associated with @inode but isn't
361 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
362 * function determines the wb associated with @inode and ensures that the
363 * association doesn't change until the transaction is finished with
364 * unlocked_inode_to_wb_end().
365 *
366 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
367 * afterwards and can't sleep during transaction. IRQ may or may not be
368 * disabled on return.
369 */
370static inline struct bdi_writeback *
371unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
372{
373 rcu_read_lock();
374
375 /*
376 * Paired with store_release in inode_switch_wb_work_fn() and
377 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
378 */
379 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
380
381 if (unlikely(*lockedp))
382 spin_lock_irq(&inode->i_mapping->tree_lock);
383
384 /*
385 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
386 * inode_to_wb() will bark. Deref directly.
387 */
388 return inode->i_wb;
389}
390
391/**
392 * unlocked_inode_to_wb_end - end inode wb access transaction
393 * @inode: target inode
394 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
395 */
396static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
397{
398 if (unlikely(locked))
399 spin_unlock_irq(&inode->i_mapping->tree_lock);
400
401 rcu_read_unlock();
402}
403
404struct wb_iter {
405 int start_blkcg_id;
406 struct radix_tree_iter tree_iter;
407 void **slot;
408};
409
410static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
411 struct backing_dev_info *bdi)
412{
413 struct radix_tree_iter *titer = &iter->tree_iter;
414
415 WARN_ON_ONCE(!rcu_read_lock_held());
416
417 if (iter->start_blkcg_id >= 0) {
418 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
419 iter->start_blkcg_id = -1;
420 } else {
421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
422 }
423
424 if (!iter->slot)
425 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
426 if (iter->slot)
427 return *iter->slot;
428 return NULL;
429}
430
431static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
432 struct backing_dev_info *bdi,
433 int start_blkcg_id)
434{
435 iter->start_blkcg_id = start_blkcg_id;
436
437 if (start_blkcg_id)
438 return __wb_iter_next(iter, bdi);
439 else
440 return &bdi->wb;
441}
442
443/**
444 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
445 * @wb_cur: cursor struct bdi_writeback pointer
446 * @bdi: bdi to walk wb's of
447 * @iter: pointer to struct wb_iter to be used as iteration buffer
448 * @start_blkcg_id: blkcg ID to start iteration from
449 *
450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
451 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
452 * to be used as temp storage during iteration. rcu_read_lock() must be
453 * held throughout iteration.
454 */
455#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
458
459#else /* CONFIG_CGROUP_WRITEBACK */
460
461static inline bool inode_cgwb_enabled(struct inode *inode)
462{
463 return false;
464}
465
466static inline struct bdi_writeback_congested *
467wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
468{
469 atomic_inc(&bdi->wb_congested->refcnt);
470 return bdi->wb_congested;
471}
472
473static inline void wb_congested_put(struct bdi_writeback_congested *congested)
474{
475 if (atomic_dec_and_test(&congested->refcnt))
476 kfree(congested);
477}
478
479static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
480{
481 return &bdi->wb;
482}
483
484static inline struct bdi_writeback *
485wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
486{
487 return &bdi->wb;
488}
489
490static inline bool inode_to_wb_is_valid(struct inode *inode)
491{
492 return true;
493}
494
495static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
496{
497 return &inode_to_bdi(inode)->wb;
498}
499
500static inline struct bdi_writeback *
501unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
502{
503 return inode_to_wb(inode);
504}
505
506static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
507{
508}
509
510static inline void wb_memcg_offline(struct mem_cgroup *memcg)
511{
512}
513
514static inline void wb_blkcg_offline(struct blkcg *blkcg)
515{
516}
517
518struct wb_iter {
519 int next_id;
520};
521
522#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
523 for ((iter)->next_id = (start_blkcg_id); \
524 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
525
526static inline int inode_congested(struct inode *inode, int cong_bits)
527{
528 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
529}
530
531#endif /* CONFIG_CGROUP_WRITEBACK */
532
533static inline int inode_read_congested(struct inode *inode)
534{
535 return inode_congested(inode, 1 << WB_sync_congested);
536}
537
538static inline int inode_write_congested(struct inode *inode)
539{
540 return inode_congested(inode, 1 << WB_async_congested);
541}
542
543static inline int inode_rw_congested(struct inode *inode)
544{
545 return inode_congested(inode, (1 << WB_sync_congested) |
546 (1 << WB_async_congested));
547}
548
549static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
550{
551 return wb_congested(&bdi->wb, cong_bits);
552}
553
554static inline int bdi_read_congested(struct backing_dev_info *bdi)
555{
556 return bdi_congested(bdi, 1 << WB_sync_congested);
557}
558
559static inline int bdi_write_congested(struct backing_dev_info *bdi)
560{
561 return bdi_congested(bdi, 1 << WB_async_congested);
562}
563
564static inline int bdi_rw_congested(struct backing_dev_info *bdi)
565{
566 return bdi_congested(bdi, (1 << WB_sync_congested) |
567 (1 << WB_async_congested));
568}
569
570#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index adb14a8616df..1e7a69adbe6f 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -117,12 +117,16 @@ struct backlight_device {
117 int use_count; 117 int use_count;
118}; 118};
119 119
120static inline void backlight_update_status(struct backlight_device *bd) 120static inline int backlight_update_status(struct backlight_device *bd)
121{ 121{
122 int ret = -ENOENT;
123
122 mutex_lock(&bd->update_lock); 124 mutex_lock(&bd->update_lock);
123 if (bd->ops && bd->ops->update_status) 125 if (bd->ops && bd->ops->update_status)
124 bd->ops->update_status(bd); 126 ret = bd->ops->update_status(bd);
125 mutex_unlock(&bd->update_lock); 127 mutex_unlock(&bd->update_lock);
128
129 return ret;
126} 130}
127 131
128extern struct backlight_device *backlight_device_register(const char *name, 132extern struct backlight_device *backlight_device_register(const char *name,
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index b12b07e75929..2793652fbf66 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -10,11 +10,17 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/vmalloc.h>
13 14
14#ifdef CONFIG_BCM47XX 15#ifdef CONFIG_BCM47XX_NVRAM
15int bcm47xx_nvram_init_from_mem(u32 base, u32 lim); 16int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
16int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len); 17int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
17int bcm47xx_nvram_gpio_pin(const char *name); 18int bcm47xx_nvram_gpio_pin(const char *name);
19char *bcm47xx_nvram_get_contents(size_t *val_len);
20static inline void bcm47xx_nvram_release_contents(char *nvram)
21{
22 vfree(nvram);
23};
18#else 24#else
19static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) 25static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
20{ 26{
@@ -29,6 +35,15 @@ static inline int bcm47xx_nvram_gpio_pin(const char *name)
29{ 35{
30 return -ENOTSUPP; 36 return -ENOTSUPP;
31}; 37};
38
39static inline char *bcm47xx_nvram_get_contents(size_t *val_len)
40{
41 return NULL;
42};
43
44static inline void bcm47xx_nvram_release_contents(char *nvram)
45{
46};
32#endif 47#endif
33 48
34#endif /* __BCM47XX_NVRAM_H */ 49#endif /* __BCM47XX_NVRAM_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index e34f906647d3..2ff4a9961e1d 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
305 305
306extern void bcma_driver_unregister(struct bcma_driver *drv); 306extern void bcma_driver_unregister(struct bcma_driver *drv);
307 307
308/* module_bcma_driver() - Helper macro for drivers that don't do
309 * anything special in module init/exit. This eliminates a lot of
310 * boilerplate. Each module may only use this macro once, and
311 * calling it replaces module_init() and module_exit()
312 */
313#define module_bcma_driver(__bcma_driver) \
314 module_driver(__bcma_driver, bcma_driver_register, \
315 bcma_driver_unregister)
316
308/* Set a fallback SPROM. 317/* Set a fallback SPROM.
309 * See kdoc at the function definition for complete documentation. */ 318 * See kdoc at the function definition for complete documentation. */
310extern int bcma_arch_register_fallback_sprom( 319extern int bcma_arch_register_fallback_sprom(
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 5ba6918ca20b..9657f11d48a7 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -246,7 +246,18 @@ static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
246} 246}
247#endif 247#endif
248 248
249#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
249extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); 250extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
250extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); 251extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
252#else
253static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
254{
255 return -ENOTSUPP;
256}
257static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
258{
259 return -ENOTSUPP;
260}
261#endif
251 262
252#endif /* LINUX_BCMA_DRIVER_PCI_H_ */ 263#endif /* LINUX_BCMA_DRIVER_PCI_H_ */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index da3a127c9958..5e963a6d7c14 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -290,7 +290,21 @@ static inline unsigned bio_segments(struct bio *bio)
290 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 290 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
291 * runs 291 * runs
292 */ 292 */
293#define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 293static inline void bio_get(struct bio *bio)
294{
295 bio->bi_flags |= (1 << BIO_REFFED);
296 smp_mb__before_atomic();
297 atomic_inc(&bio->__bi_cnt);
298}
299
300static inline void bio_cnt_set(struct bio *bio, unsigned int count)
301{
302 if (count != 1) {
303 bio->bi_flags |= (1 << BIO_REFFED);
304 smp_mb__before_atomic();
305 }
306 atomic_set(&bio->__bi_cnt, count);
307}
294 308
295enum bip_flags { 309enum bip_flags {
296 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 310 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
@@ -413,7 +427,6 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
413} 427}
414 428
415extern void bio_endio(struct bio *, int); 429extern void bio_endio(struct bio *, int);
416extern void bio_endio_nodec(struct bio *, int);
417struct request_queue; 430struct request_queue;
418extern int bio_phys_segments(struct request_queue *, struct bio *); 431extern int bio_phys_segments(struct request_queue *, struct bio *);
419 432
@@ -469,9 +482,12 @@ extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
469extern unsigned int bvec_nr_vecs(unsigned short idx); 482extern unsigned int bvec_nr_vecs(unsigned short idx);
470 483
471#ifdef CONFIG_BLK_CGROUP 484#ifdef CONFIG_BLK_CGROUP
485int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
472int bio_associate_current(struct bio *bio); 486int bio_associate_current(struct bio *bio);
473void bio_disassociate_task(struct bio *bio); 487void bio_disassociate_task(struct bio *bio);
474#else /* CONFIG_BLK_CGROUP */ 488#else /* CONFIG_BLK_CGROUP */
489static inline int bio_associate_blkcg(struct bio *bio,
490 struct cgroup_subsys_state *blkcg_css) { return 0; }
475static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } 491static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
476static inline void bio_disassociate_task(struct bio *bio) { } 492static inline void bio_disassociate_task(struct bio *bio) { }
477#endif /* CONFIG_BLK_CGROUP */ 493#endif /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
new file mode 100644
index 000000000000..1b62d768c7df
--- /dev/null
+++ b/include/linux/blk-cgroup.h
@@ -0,0 +1,650 @@
1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
18#include <linux/seq_file.h>
19#include <linux/radix-tree.h>
20#include <linux/blkdev.h>
21#include <linux/atomic.h>
22
23/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
26#ifdef CONFIG_BLK_CGROUP
27
28enum blkg_rwstat_type {
29 BLKG_RWSTAT_READ,
30 BLKG_RWSTAT_WRITE,
31 BLKG_RWSTAT_SYNC,
32 BLKG_RWSTAT_ASYNC,
33
34 BLKG_RWSTAT_NR,
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36};
37
38struct blkcg_gq;
39
40struct blkcg {
41 struct cgroup_subsys_state css;
42 spinlock_t lock;
43
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list;
47
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
49
50 struct list_head all_blkcgs_node;
51#ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list;
53#endif
54};
55
56struct blkg_stat {
57 struct u64_stats_sync syncp;
58 uint64_t cnt;
59};
60
61struct blkg_rwstat {
62 struct u64_stats_sync syncp;
63 uint64_t cnt[BLKG_RWSTAT_NR];
64};
65
66/*
67 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair.
70 *
71 * There can be multiple active blkcg policies and each has its private
72 * data on each blkg, the size of which is determined by
73 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
74 * together with blkg and invokes pd_init/exit_fn() methods.
75 *
76 * Such private data must embed struct blkg_policy_data (pd) at the
77 * beginning and pd_size can't be smaller than pd.
78 */
79struct blkg_policy_data {
80 /* the blkg and policy id this per-policy data belongs to */
81 struct blkcg_gq *blkg;
82 int plid;
83
84 /* used during policy activation */
85 struct list_head alloc_node;
86};
87
88/*
89 * Policies that need to keep per-blkcg data which is independent
90 * from any request_queue associated to it must specify its size
91 * with the cpd_size field of the blkcg_policy structure and
92 * embed a blkcg_policy_data in it. cpd_init() is invoked to let
93 * each policy handle per-blkcg data.
94 */
95struct blkcg_policy_data {
96 /* the policy id this per-policy data belongs to */
97 int plid;
98};
99
100/* association between a blk cgroup and a request queue */
101struct blkcg_gq {
102 /* Pointer to the associated request_queue */
103 struct request_queue *q;
104 struct list_head q_node;
105 struct hlist_node blkcg_node;
106 struct blkcg *blkcg;
107
108 /*
109 * Each blkg gets congested separately and the congestion state is
110 * propagated to the matching bdi_writeback_congested.
111 */
112 struct bdi_writeback_congested *wb_congested;
113
114 /* all non-root blkcg_gq's are guaranteed to have access to parent */
115 struct blkcg_gq *parent;
116
117 /* request allocation list for this blkcg-q pair */
118 struct request_list rl;
119
120 /* reference count */
121 atomic_t refcnt;
122
123 /* is this blkg online? protected by both blkcg and q locks */
124 bool online;
125
126 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
127
128 struct rcu_head rcu_head;
129};
130
131typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
132typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
133typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
134typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
135typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
136typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
137
138struct blkcg_policy {
139 int plid;
140 /* policy specific private data size */
141 size_t pd_size;
142 /* policy specific per-blkcg data size */
143 size_t cpd_size;
144 /* cgroup files for the policy */
145 struct cftype *cftypes;
146
147 /* operations */
148 blkcg_pol_init_cpd_fn *cpd_init_fn;
149 blkcg_pol_init_pd_fn *pd_init_fn;
150 blkcg_pol_online_pd_fn *pd_online_fn;
151 blkcg_pol_offline_pd_fn *pd_offline_fn;
152 blkcg_pol_exit_pd_fn *pd_exit_fn;
153 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
154};
155
156extern struct blkcg blkcg_root;
157extern struct cgroup_subsys_state * const blkcg_root_css;
158
159struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
160struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
161 struct request_queue *q);
162int blkcg_init_queue(struct request_queue *q);
163void blkcg_drain_queue(struct request_queue *q);
164void blkcg_exit_queue(struct request_queue *q);
165
166/* Blkio controller policy registration */
167int blkcg_policy_register(struct blkcg_policy *pol);
168void blkcg_policy_unregister(struct blkcg_policy *pol);
169int blkcg_activate_policy(struct request_queue *q,
170 const struct blkcg_policy *pol);
171void blkcg_deactivate_policy(struct request_queue *q,
172 const struct blkcg_policy *pol);
173
174void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
175 u64 (*prfill)(struct seq_file *,
176 struct blkg_policy_data *, int),
177 const struct blkcg_policy *pol, int data,
178 bool show_total);
179u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
180u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
181 const struct blkg_rwstat *rwstat);
182u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
183u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
184 int off);
185
186u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
187struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
188 int off);
189
190struct blkg_conf_ctx {
191 struct gendisk *disk;
192 struct blkcg_gq *blkg;
193 u64 v;
194};
195
196int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
197 const char *input, struct blkg_conf_ctx *ctx);
198void blkg_conf_finish(struct blkg_conf_ctx *ctx);
199
200
201static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
202{
203 return css ? container_of(css, struct blkcg, css) : NULL;
204}
205
206static inline struct blkcg *task_blkcg(struct task_struct *tsk)
207{
208 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
209}
210
211static inline struct blkcg *bio_blkcg(struct bio *bio)
212{
213 if (bio && bio->bi_css)
214 return css_to_blkcg(bio->bi_css);
215 return task_blkcg(current);
216}
217
218static inline struct cgroup_subsys_state *
219task_get_blkcg_css(struct task_struct *task)
220{
221 return task_get_css(task, blkio_cgrp_id);
222}
223
224/**
225 * blkcg_parent - get the parent of a blkcg
226 * @blkcg: blkcg of interest
227 *
228 * Return the parent blkcg of @blkcg. Can be called anytime.
229 */
230static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
231{
232 return css_to_blkcg(blkcg->css.parent);
233}
234
235/**
236 * blkg_to_pdata - get policy private data
237 * @blkg: blkg of interest
238 * @pol: policy of interest
239 *
240 * Return pointer to private data associated with the @blkg-@pol pair.
241 */
242static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
243 struct blkcg_policy *pol)
244{
245 return blkg ? blkg->pd[pol->plid] : NULL;
246}
247
248static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
249 struct blkcg_policy *pol)
250{
251 return blkcg ? blkcg->pd[pol->plid] : NULL;
252}
253
254/**
255 * pdata_to_blkg - get blkg associated with policy private data
256 * @pd: policy private data of interest
257 *
258 * @pd is policy private data. Determine the blkg it's associated with.
259 */
260static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
261{
262 return pd ? pd->blkg : NULL;
263}
264
265/**
266 * blkg_path - format cgroup path of blkg
267 * @blkg: blkg of interest
268 * @buf: target buffer
269 * @buflen: target buffer length
270 *
271 * Format the path of the cgroup of @blkg into @buf.
272 */
273static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
274{
275 char *p;
276
277 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
278 if (!p) {
279 strncpy(buf, "<unavailable>", buflen);
280 return -ENAMETOOLONG;
281 }
282
283 memmove(buf, p, buf + buflen - p);
284 return 0;
285}
286
287/**
288 * blkg_get - get a blkg reference
289 * @blkg: blkg to get
290 *
291 * The caller should be holding an existing reference.
292 */
293static inline void blkg_get(struct blkcg_gq *blkg)
294{
295 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
296 atomic_inc(&blkg->refcnt);
297}
298
299void __blkg_release_rcu(struct rcu_head *rcu);
300
301/**
302 * blkg_put - put a blkg reference
303 * @blkg: blkg to put
304 */
305static inline void blkg_put(struct blkcg_gq *blkg)
306{
307 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
308 if (atomic_dec_and_test(&blkg->refcnt))
309 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
310}
311
312struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
313 bool update_hint);
314
315/**
316 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
317 * @d_blkg: loop cursor pointing to the current descendant
318 * @pos_css: used for iteration
319 * @p_blkg: target blkg to walk descendants of
320 *
321 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
322 * read locked. If called under either blkcg or queue lock, the iteration
323 * is guaranteed to include all and only online blkgs. The caller may
324 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
325 * @p_blkg is included in the iteration and the first node to be visited.
326 */
327#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
328 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
329 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
330 (p_blkg)->q, false)))
331
332/**
333 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
334 * @d_blkg: loop cursor pointing to the current descendant
335 * @pos_css: used for iteration
336 * @p_blkg: target blkg to walk descendants of
337 *
338 * Similar to blkg_for_each_descendant_pre() but performs post-order
339 * traversal instead. Synchronization rules are the same. @p_blkg is
340 * included in the iteration and the last node to be visited.
341 */
342#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
343 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
344 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
345 (p_blkg)->q, false)))
346
347/**
348 * blk_get_rl - get request_list to use
349 * @q: request_queue of interest
350 * @bio: bio which will be attached to the allocated request (may be %NULL)
351 *
352 * The caller wants to allocate a request from @q to use for @bio. Find
353 * the request_list to use and obtain a reference on it. Should be called
354 * under queue_lock. This function is guaranteed to return non-%NULL
355 * request_list.
356 */
357static inline struct request_list *blk_get_rl(struct request_queue *q,
358 struct bio *bio)
359{
360 struct blkcg *blkcg;
361 struct blkcg_gq *blkg;
362
363 rcu_read_lock();
364
365 blkcg = bio_blkcg(bio);
366
367 /* bypass blkg lookup and use @q->root_rl directly for root */
368 if (blkcg == &blkcg_root)
369 goto root_rl;
370
371 /*
372 * Try to use blkg->rl. blkg lookup may fail under memory pressure
373 * or if either the blkcg or queue is going away. Fall back to
374 * root_rl in such cases.
375 */
376 blkg = blkg_lookup_create(blkcg, q);
377 if (unlikely(IS_ERR(blkg)))
378 goto root_rl;
379
380 blkg_get(blkg);
381 rcu_read_unlock();
382 return &blkg->rl;
383root_rl:
384 rcu_read_unlock();
385 return &q->root_rl;
386}
387
388/**
389 * blk_put_rl - put request_list
390 * @rl: request_list to put
391 *
392 * Put the reference acquired by blk_get_rl(). Should be called under
393 * queue_lock.
394 */
395static inline void blk_put_rl(struct request_list *rl)
396{
397 /* root_rl may not have blkg set */
398 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
399 blkg_put(rl->blkg);
400}
401
402/**
403 * blk_rq_set_rl - associate a request with a request_list
404 * @rq: request of interest
405 * @rl: target request_list
406 *
407 * Associate @rq with @rl so that accounting and freeing can know the
408 * request_list @rq came from.
409 */
410static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
411{
412 rq->rl = rl;
413}
414
415/**
416 * blk_rq_rl - return the request_list a request came from
417 * @rq: request of interest
418 *
419 * Return the request_list @rq is allocated from.
420 */
421static inline struct request_list *blk_rq_rl(struct request *rq)
422{
423 return rq->rl;
424}
425
426struct request_list *__blk_queue_next_rl(struct request_list *rl,
427 struct request_queue *q);
428/**
429 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
430 *
431 * Should be used under queue_lock.
432 */
433#define blk_queue_for_each_rl(rl, q) \
434 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
435
436static inline void blkg_stat_init(struct blkg_stat *stat)
437{
438 u64_stats_init(&stat->syncp);
439}
440
441/**
442 * blkg_stat_add - add a value to a blkg_stat
443 * @stat: target blkg_stat
444 * @val: value to add
445 *
446 * Add @val to @stat. The caller is responsible for synchronizing calls to
447 * this function.
448 */
449static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
450{
451 u64_stats_update_begin(&stat->syncp);
452 stat->cnt += val;
453 u64_stats_update_end(&stat->syncp);
454}
455
456/**
457 * blkg_stat_read - read the current value of a blkg_stat
458 * @stat: blkg_stat to read
459 *
460 * Read the current value of @stat. This function can be called without
461 * synchroniztion and takes care of u64 atomicity.
462 */
463static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
464{
465 unsigned int start;
466 uint64_t v;
467
468 do {
469 start = u64_stats_fetch_begin_irq(&stat->syncp);
470 v = stat->cnt;
471 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
472
473 return v;
474}
475
476/**
477 * blkg_stat_reset - reset a blkg_stat
478 * @stat: blkg_stat to reset
479 */
480static inline void blkg_stat_reset(struct blkg_stat *stat)
481{
482 stat->cnt = 0;
483}
484
485/**
486 * blkg_stat_merge - merge a blkg_stat into another
487 * @to: the destination blkg_stat
488 * @from: the source
489 *
490 * Add @from's count to @to.
491 */
492static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
493{
494 blkg_stat_add(to, blkg_stat_read(from));
495}
496
497static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
498{
499 u64_stats_init(&rwstat->syncp);
500}
501
502/**
503 * blkg_rwstat_add - add a value to a blkg_rwstat
504 * @rwstat: target blkg_rwstat
505 * @rw: mask of REQ_{WRITE|SYNC}
506 * @val: value to add
507 *
508 * Add @val to @rwstat. The counters are chosen according to @rw. The
509 * caller is responsible for synchronizing calls to this function.
510 */
511static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
512 int rw, uint64_t val)
513{
514 u64_stats_update_begin(&rwstat->syncp);
515
516 if (rw & REQ_WRITE)
517 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
518 else
519 rwstat->cnt[BLKG_RWSTAT_READ] += val;
520 if (rw & REQ_SYNC)
521 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
522 else
523 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
524
525 u64_stats_update_end(&rwstat->syncp);
526}
527
528/**
529 * blkg_rwstat_read - read the current values of a blkg_rwstat
530 * @rwstat: blkg_rwstat to read
531 *
532 * Read the current snapshot of @rwstat and return it as the return value.
533 * This function can be called without synchronization and takes care of
534 * u64 atomicity.
535 */
536static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
537{
538 unsigned int start;
539 struct blkg_rwstat tmp;
540
541 do {
542 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
543 tmp = *rwstat;
544 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
545
546 return tmp;
547}
548
549/**
550 * blkg_rwstat_total - read the total count of a blkg_rwstat
551 * @rwstat: blkg_rwstat to read
552 *
553 * Return the total count of @rwstat regardless of the IO direction. This
554 * function can be called without synchronization and takes care of u64
555 * atomicity.
556 */
557static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
558{
559 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
560
561 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
562}
563
564/**
565 * blkg_rwstat_reset - reset a blkg_rwstat
566 * @rwstat: blkg_rwstat to reset
567 */
568static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
569{
570 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
571}
572
573/**
574 * blkg_rwstat_merge - merge a blkg_rwstat into another
575 * @to: the destination blkg_rwstat
576 * @from: the source
577 *
578 * Add @from's counts to @to.
579 */
580static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
581 struct blkg_rwstat *from)
582{
583 struct blkg_rwstat v = blkg_rwstat_read(from);
584 int i;
585
586 u64_stats_update_begin(&to->syncp);
587 for (i = 0; i < BLKG_RWSTAT_NR; i++)
588 to->cnt[i] += v.cnt[i];
589 u64_stats_update_end(&to->syncp);
590}
591
592#else /* CONFIG_BLK_CGROUP */
593
594struct blkcg {
595};
596
597struct blkg_policy_data {
598};
599
600struct blkcg_policy_data {
601};
602
603struct blkcg_gq {
604};
605
606struct blkcg_policy {
607};
608
609#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
610
611static inline struct cgroup_subsys_state *
612task_get_blkcg_css(struct task_struct *task)
613{
614 return NULL;
615}
616
617#ifdef CONFIG_BLOCK
618
619static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
620static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
621static inline void blkcg_drain_queue(struct request_queue *q) { }
622static inline void blkcg_exit_queue(struct request_queue *q) { }
623static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
624static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
625static inline int blkcg_activate_policy(struct request_queue *q,
626 const struct blkcg_policy *pol) { return 0; }
627static inline void blkcg_deactivate_policy(struct request_queue *q,
628 const struct blkcg_policy *pol) { }
629
630static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
631
632static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
633 struct blkcg_policy *pol) { return NULL; }
634static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
635static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
636static inline void blkg_get(struct blkcg_gq *blkg) { }
637static inline void blkg_put(struct blkcg_gq *blkg) { }
638
639static inline struct request_list *blk_get_rl(struct request_queue *q,
640 struct bio *bio) { return &q->root_rl; }
641static inline void blk_put_rl(struct request_list *rl) { }
642static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
643static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
644
645#define blk_queue_for_each_rl(rl, q) \
646 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
647
648#endif /* CONFIG_BLOCK */
649#endif /* CONFIG_BLK_CGROUP */
650#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2056a99b92f8..37d1602c4f7a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -96,6 +96,7 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
96 96
97typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 97typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
98 bool); 98 bool);
99typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
99 100
100struct blk_mq_ops { 101struct blk_mq_ops {
101 /* 102 /*
@@ -182,6 +183,7 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
182struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 183struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
183 gfp_t gfp, bool reserved); 184 gfp_t gfp, bool reserved);
184struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 185struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
186struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
185 187
186enum { 188enum {
187 BLK_MQ_UNIQUE_TAG_BITS = 16, 189 BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -224,6 +226,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async);
224void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 226void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
225void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 227void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
226 void *priv); 228 void *priv);
229void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
230 void *priv);
227void blk_mq_freeze_queue(struct request_queue *q); 231void blk_mq_freeze_queue(struct request_queue *q);
228void blk_mq_unfreeze_queue(struct request_queue *q); 232void blk_mq_unfreeze_queue(struct request_queue *q);
229void blk_mq_freeze_queue_start(struct request_queue *q); 233void blk_mq_freeze_queue_start(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index b7299febc4b4..7303b3405520 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -65,7 +65,7 @@ struct bio {
65 unsigned int bi_seg_front_size; 65 unsigned int bi_seg_front_size;
66 unsigned int bi_seg_back_size; 66 unsigned int bi_seg_back_size;
67 67
68 atomic_t bi_remaining; 68 atomic_t __bi_remaining;
69 69
70 bio_end_io_t *bi_end_io; 70 bio_end_io_t *bi_end_io;
71 71
@@ -92,7 +92,7 @@ struct bio {
92 92
93 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 93 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
94 94
95 atomic_t bi_cnt; /* pin count */ 95 atomic_t __bi_cnt; /* pin count */
96 96
97 struct bio_vec *bi_io_vec; /* the actual vec list */ 97 struct bio_vec *bi_io_vec; /* the actual vec list */
98 98
@@ -112,16 +112,15 @@ struct bio {
112 * bio flags 112 * bio flags
113 */ 113 */
114#define BIO_UPTODATE 0 /* ok after I/O completion */ 114#define BIO_UPTODATE 0 /* ok after I/O completion */
115#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
116#define BIO_EOF 2 /* out-out-bounds error */ 116#define BIO_CLONED 2 /* doesn't own data */
117#define BIO_SEG_VALID 3 /* bi_phys_segments valid */ 117#define BIO_BOUNCED 3 /* bio is a bounce bio */
118#define BIO_CLONED 4 /* doesn't own data */ 118#define BIO_USER_MAPPED 4 /* contains user pages */
119#define BIO_BOUNCED 5 /* bio is a bounce bio */ 119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
120#define BIO_USER_MAPPED 6 /* contains user pages */ 120#define BIO_QUIET 6 /* Make BIO Quiet */
121#define BIO_EOPNOTSUPP 7 /* not supported */ 121#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
122#define BIO_NULL_MAPPED 8 /* contains invalid user pages */ 122#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
123#define BIO_QUIET 9 /* Make BIO Quiet */ 123#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
124#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
125 124
126/* 125/*
127 * Flags starting here get preserved by bio_reset() - this includes 126 * Flags starting here get preserved by bio_reset() - this includes
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5d93a6645e88..d4068c17d0df 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -12,7 +12,7 @@
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/workqueue.h> 13#include <linux/workqueue.h>
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/backing-dev.h> 15#include <linux/backing-dev-defs.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <linux/mempool.h> 17#include <linux/mempool.h>
18#include <linux/bio.h> 18#include <linux/bio.h>
@@ -22,15 +22,13 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/percpu-refcount.h> 24#include <linux/percpu-refcount.h>
25 25#include <linux/scatterlist.h>
26#include <asm/scatterlist.h>
27 26
28struct module; 27struct module;
29struct scsi_ioctl_command; 28struct scsi_ioctl_command;
30 29
31struct request_queue; 30struct request_queue;
32struct elevator_queue; 31struct elevator_queue;
33struct request_pm_state;
34struct blk_trace; 32struct blk_trace;
35struct request; 33struct request;
36struct sg_io_hdr; 34struct sg_io_hdr;
@@ -75,18 +73,7 @@ struct request_list {
75enum rq_cmd_type_bits { 73enum rq_cmd_type_bits {
76 REQ_TYPE_FS = 1, /* fs request */ 74 REQ_TYPE_FS = 1, /* fs request */
77 REQ_TYPE_BLOCK_PC, /* scsi command */ 75 REQ_TYPE_BLOCK_PC, /* scsi command */
78 REQ_TYPE_SENSE, /* sense request */ 76 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
79 REQ_TYPE_PM_SUSPEND, /* suspend request */
80 REQ_TYPE_PM_RESUME, /* resume request */
81 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
82 REQ_TYPE_SPECIAL, /* driver defined type */
83 /*
84 * for ATA/ATAPI devices. this really doesn't belong here, ide should
85 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
86 * private REQ_LB opcodes to differentiate what type of request this is
87 */
88 REQ_TYPE_ATA_TASKFILE,
89 REQ_TYPE_ATA_PC,
90}; 77};
91 78
92#define BLK_MAX_CDB 16 79#define BLK_MAX_CDB 16
@@ -108,7 +95,7 @@ struct request {
108 struct blk_mq_ctx *mq_ctx; 95 struct blk_mq_ctx *mq_ctx;
109 96
110 u64 cmd_flags; 97 u64 cmd_flags;
111 enum rq_cmd_type_bits cmd_type; 98 unsigned cmd_type;
112 unsigned long atomic_flags; 99 unsigned long atomic_flags;
113 100
114 int cpu; 101 int cpu;
@@ -216,19 +203,6 @@ static inline unsigned short req_get_ioprio(struct request *req)
216 return req->ioprio; 203 return req->ioprio;
217} 204}
218 205
219/*
220 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
221 * requests. Some step values could eventually be made generic.
222 */
223struct request_pm_state
224{
225 /* PM state machine step value, currently driver specific */
226 int pm_step;
227 /* requested PM state value (S1, S2, S3, S4, ...) */
228 u32 pm_state;
229 void* data; /* for driver use */
230};
231
232#include <linux/elevator.h> 206#include <linux/elevator.h>
233 207
234struct blk_queue_ctx; 208struct blk_queue_ctx;
@@ -469,7 +443,7 @@ struct request_queue {
469 struct mutex sysfs_lock; 443 struct mutex sysfs_lock;
470 444
471 int bypass_depth; 445 int bypass_depth;
472 int mq_freeze_depth; 446 atomic_t mq_freeze_depth;
473 447
474#if defined(CONFIG_BLK_DEV_BSG) 448#if defined(CONFIG_BLK_DEV_BSG)
475 bsg_job_fn *bsg_job_fn; 449 bsg_job_fn *bsg_job_fn;
@@ -610,10 +584,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
610 (((rq)->cmd_flags & REQ_STARTED) && \ 584 (((rq)->cmd_flags & REQ_STARTED) && \
611 ((rq)->cmd_type == REQ_TYPE_FS)) 585 ((rq)->cmd_type == REQ_TYPE_FS))
612 586
613#define blk_pm_request(rq) \
614 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
615 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
616
617#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 587#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
618#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 588#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
619/* rq->queuelist of dequeued request must be list_empty() */ 589/* rq->queuelist of dequeued request must be list_empty() */
@@ -821,30 +791,12 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
821extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 791extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
822 struct scsi_ioctl_command __user *); 792 struct scsi_ioctl_command __user *);
823 793
824/*
825 * A queue has just exitted congestion. Note this in the global counter of
826 * congested queues, and wake up anyone who was waiting for requests to be
827 * put back.
828 */
829static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
830{
831 clear_bdi_congested(&q->backing_dev_info, sync);
832}
833
834/*
835 * A queue has just entered congestion. Flag that in the queue's VM-visible
836 * state flags and increment the global gounter of congested queues.
837 */
838static inline void blk_set_queue_congested(struct request_queue *q, int sync)
839{
840 set_bdi_congested(&q->backing_dev_info, sync);
841}
842
843extern void blk_start_queue(struct request_queue *q); 794extern void blk_start_queue(struct request_queue *q);
844extern void blk_stop_queue(struct request_queue *q); 795extern void blk_stop_queue(struct request_queue *q);
845extern void blk_sync_queue(struct request_queue *q); 796extern void blk_sync_queue(struct request_queue *q);
846extern void __blk_stop_queue(struct request_queue *q); 797extern void __blk_stop_queue(struct request_queue *q);
847extern void __blk_run_queue(struct request_queue *q); 798extern void __blk_run_queue(struct request_queue *q);
799extern void __blk_run_queue_uncond(struct request_queue *q);
848extern void blk_run_queue(struct request_queue *); 800extern void blk_run_queue(struct request_queue *);
849extern void blk_run_queue_async(struct request_queue *q); 801extern void blk_run_queue_async(struct request_queue *q);
850extern int blk_rq_map_user(struct request_queue *, struct request *, 802extern int blk_rq_map_user(struct request_queue *, struct request *,
@@ -933,7 +885,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
933 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 885 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
934 return q->limits.max_hw_sectors; 886 return q->limits.max_hw_sectors;
935 887
936 if (!q->limits.chunk_sectors) 888 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
937 return blk_queue_get_max_sectors(q, rq->cmd_flags); 889 return blk_queue_get_max_sectors(q, rq->cmd_flags);
938 890
939 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 891 return min(blk_max_size_offset(q, blk_rq_pos(rq)),
@@ -1054,6 +1006,7 @@ bool __must_check blk_get_queue(struct request_queue *);
1054struct request_queue *blk_alloc_queue(gfp_t); 1006struct request_queue *blk_alloc_queue(gfp_t);
1055struct request_queue *blk_alloc_queue_node(gfp_t, int); 1007struct request_queue *blk_alloc_queue_node(gfp_t, int);
1056extern void blk_put_queue(struct request_queue *); 1008extern void blk_put_queue(struct request_queue *);
1009extern void blk_set_queue_dying(struct request_queue *);
1057 1010
1058/* 1011/*
1059 * block layer runtime pm functions 1012 * block layer runtime pm functions
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 0995c2de8162..f589222bfa87 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -357,12 +357,12 @@ extern void *alloc_large_system_hash(const char *tablename,
357/* Only NUMA needs hash distribution. 64bit NUMA architectures have 357/* Only NUMA needs hash distribution. 64bit NUMA architectures have
358 * sufficient vmalloc space. 358 * sufficient vmalloc space.
359 */ 359 */
360#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT) 360#ifdef CONFIG_NUMA
361#define HASHDIST_DEFAULT 1 361#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
362extern int hashdist; /* Distribute hashes across NUMA nodes? */
362#else 363#else
363#define HASHDIST_DEFAULT 0 364#define hashdist (0)
364#endif 365#endif
365extern int hashdist; /* Distribute hashes across NUMA nodes? */
366 366
367 367
368#endif /* _LINUX_BOOTMEM_H */ 368#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d5cda067115a..4383476a0d48 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -105,7 +105,8 @@ struct bpf_verifier_ops {
105 */ 105 */
106 bool (*is_valid_access)(int off, int size, enum bpf_access_type type); 106 bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
107 107
108 u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, 108 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
109 int src_reg, int ctx_off,
109 struct bpf_insn *insn); 110 struct bpf_insn *insn);
110}; 111};
111 112
@@ -123,15 +124,41 @@ struct bpf_prog_aux {
123 const struct bpf_verifier_ops *ops; 124 const struct bpf_verifier_ops *ops;
124 struct bpf_map **used_maps; 125 struct bpf_map **used_maps;
125 struct bpf_prog *prog; 126 struct bpf_prog *prog;
126 struct work_struct work; 127 union {
128 struct work_struct work;
129 struct rcu_head rcu;
130 };
127}; 131};
128 132
133struct bpf_array {
134 struct bpf_map map;
135 u32 elem_size;
136 /* 'ownership' of prog_array is claimed by the first program that
137 * is going to use this map or by the first program which FD is stored
138 * in the map to make sure that all callers and callees have the same
139 * prog_type and JITed flag
140 */
141 enum bpf_prog_type owner_prog_type;
142 bool owner_jited;
143 union {
144 char value[0] __aligned(8);
145 struct bpf_prog *prog[0] __aligned(8);
146 };
147};
148#define MAX_TAIL_CALL_CNT 32
149
150u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
151void bpf_prog_array_map_clear(struct bpf_map *map);
152bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
153const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
154
129#ifdef CONFIG_BPF_SYSCALL 155#ifdef CONFIG_BPF_SYSCALL
130void bpf_register_prog_type(struct bpf_prog_type_list *tl); 156void bpf_register_prog_type(struct bpf_prog_type_list *tl);
131void bpf_register_map_type(struct bpf_map_type_list *tl); 157void bpf_register_map_type(struct bpf_map_type_list *tl);
132 158
133struct bpf_prog *bpf_prog_get(u32 ufd); 159struct bpf_prog *bpf_prog_get(u32 ufd);
134void bpf_prog_put(struct bpf_prog *prog); 160void bpf_prog_put(struct bpf_prog *prog);
161void bpf_prog_put_rcu(struct bpf_prog *prog);
135 162
136struct bpf_map *bpf_map_get(struct fd f); 163struct bpf_map *bpf_map_get(struct fd f);
137void bpf_map_put(struct bpf_map *map); 164void bpf_map_put(struct bpf_map *map);
@@ -160,5 +187,10 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
160 187
161extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 188extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
162extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 189extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
190extern const struct bpf_func_proto bpf_tail_call_proto;
191extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
192extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
193extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
194extern const struct bpf_func_proto bpf_get_current_comm_proto;
163 195
164#endif /* _LINUX_BPF_H */ 196#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 656da2a12ffe..697ca7795bd9 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
1#ifndef _LINUX_BRCMPHY_H 1#ifndef _LINUX_BRCMPHY_H
2#define _LINUX_BRCMPHY_H 2#define _LINUX_BRCMPHY_H
3 3
4#include <linux/phy.h>
5
6/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used
7 * to configure the switch internal registers via MDIO accesses.
8 */
9#define BRCM_PSEUDO_PHY_ADDR 30
10
4#define PHY_ID_BCM50610 0x0143bd60 11#define PHY_ID_BCM50610 0x0143bd60
5#define PHY_ID_BCM50610M 0x0143bd70 12#define PHY_ID_BCM50610M 0x0143bd70
6#define PHY_ID_BCM5241 0x0143bc30 13#define PHY_ID_BCM5241 0x0143bc30
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 73b45225a7ca..e6797ded700e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
318} 318}
319 319
320
321static inline struct buffer_head *
322sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
323{
324 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
325}
326
320static inline struct buffer_head * 327static inline struct buffer_head *
321sb_find_get_block(struct super_block *sb, sector_t block) 328sb_find_get_block(struct super_block *sb, sector_t block)
322{ 329{
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 3daf5ed392c9..2189935075b4 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -19,7 +19,7 @@ enum cache_type {
19/** 19/**
20 * struct cacheinfo - represent a cache leaf node 20 * struct cacheinfo - represent a cache leaf node
21 * @type: type of the cache - data, inst or unified 21 * @type: type of the cache - data, inst or unified
22 * @level: represents the hierarcy in the multi-level cache 22 * @level: represents the hierarchy in the multi-level cache
23 * @coherency_line_size: size of each cache line usually representing 23 * @coherency_line_size: size of each cache line usually representing
24 * the minimum amount of data that gets transferred from memory 24 * the minimum amount of data that gets transferred from memory
25 * @number_of_sets: total number of sets, a set is a collection of cache 25 * @number_of_sets: total number of sets, a set is a collection of cache
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index b6a52a4b457a..51bb6532785c 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -27,10 +27,12 @@
27/** 27/**
28 * struct can_skb_priv - private additional data inside CAN sk_buffs 28 * struct can_skb_priv - private additional data inside CAN sk_buffs
29 * @ifindex: ifindex of the first interface the CAN frame appeared on 29 * @ifindex: ifindex of the first interface the CAN frame appeared on
30 * @skbcnt: atomic counter to have an unique id together with skb pointer
30 * @cf: align to the following CAN frame at skb->data 31 * @cf: align to the following CAN frame at skb->data
31 */ 32 */
32struct can_skb_priv { 33struct can_skb_priv {
33 int ifindex; 34 int ifindex;
35 int skbcnt;
34 struct can_frame cf[0]; 36 struct can_frame cf[0];
35}; 37};
36 38
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 30f92cefaa72..9ebee53d3bf5 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -43,9 +43,9 @@ struct ceph_options {
43 int flags; 43 int flags;
44 struct ceph_fsid fsid; 44 struct ceph_fsid fsid;
45 struct ceph_entity_addr my_addr; 45 struct ceph_entity_addr my_addr;
46 int mount_timeout; 46 unsigned long mount_timeout; /* jiffies */
47 int osd_idle_ttl; 47 unsigned long osd_idle_ttl; /* jiffies */
48 int osd_keepalive_timeout; 48 unsigned long osd_keepalive_timeout; /* jiffies */
49 49
50 /* 50 /*
51 * any type that can't be simply compared or doesn't need need 51 * any type that can't be simply compared or doesn't need need
@@ -63,9 +63,9 @@ struct ceph_options {
63/* 63/*
64 * defaults 64 * defaults
65 */ 65 */
66#define CEPH_MOUNT_TIMEOUT_DEFAULT 60 66#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
67#define CEPH_OSD_KEEPALIVE_DEFAULT 5 67#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
68#define CEPH_OSD_IDLE_TTL_DEFAULT 60 68#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
69 69
70#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) 70#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
71#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) 71#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
@@ -93,13 +93,9 @@ enum {
93 CEPH_MOUNT_SHUTDOWN, 93 CEPH_MOUNT_SHUTDOWN,
94}; 94};
95 95
96/* 96static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
97 * subtract jiffies
98 */
99static inline unsigned long time_sub(unsigned long a, unsigned long b)
100{ 97{
101 BUG_ON(time_after(b, a)); 98 return timeout ?: MAX_SCHEDULE_TIMEOUT;
102 return (long)a - (long)b;
103} 99}
104 100
105struct ceph_mds_client; 101struct ceph_mds_client;
@@ -178,6 +174,7 @@ static inline int calc_pages_for(u64 off, u64 len)
178 174
179extern struct kmem_cache *ceph_inode_cachep; 175extern struct kmem_cache *ceph_inode_cachep;
180extern struct kmem_cache *ceph_cap_cachep; 176extern struct kmem_cache *ceph_cap_cachep;
177extern struct kmem_cache *ceph_cap_flush_cachep;
181extern struct kmem_cache *ceph_dentry_cachep; 178extern struct kmem_cache *ceph_dentry_cachep;
182extern struct kmem_cache *ceph_file_cachep; 179extern struct kmem_cache *ceph_file_cachep;
183 180
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index e15499422fdc..37753278987a 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -8,6 +8,7 @@
8#include <linux/radix-tree.h> 8#include <linux/radix-tree.h>
9#include <linux/uio.h> 9#include <linux/uio.h>
10#include <linux/workqueue.h> 10#include <linux/workqueue.h>
11#include <net/net_namespace.h>
11 12
12#include <linux/ceph/types.h> 13#include <linux/ceph/types.h>
13#include <linux/ceph/buffer.h> 14#include <linux/ceph/buffer.h>
@@ -56,6 +57,7 @@ struct ceph_messenger {
56 struct ceph_entity_addr my_enc_addr; 57 struct ceph_entity_addr my_enc_addr;
57 58
58 atomic_t stopping; 59 atomic_t stopping;
60 possible_net_t net;
59 bool nocrc; 61 bool nocrc;
60 bool tcp_nodelay; 62 bool tcp_nodelay;
61 63
@@ -267,6 +269,7 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
267 u64 required_features, 269 u64 required_features,
268 bool nocrc, 270 bool nocrc,
269 bool tcp_nodelay); 271 bool tcp_nodelay);
272extern void ceph_messenger_fini(struct ceph_messenger *msgr);
270 273
271extern void ceph_con_init(struct ceph_connection *con, void *private, 274extern void ceph_con_init(struct ceph_connection *con, void *private,
272 const struct ceph_connection_operations *ops, 275 const struct ceph_connection_operations *ops,
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 61b19c46bdb3..7506b485bb6d 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -249,7 +249,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
249 struct ceph_msg *msg); 249 struct ceph_msg *msg);
250 250
251extern void osd_req_op_init(struct ceph_osd_request *osd_req, 251extern void osd_req_op_init(struct ceph_osd_request *osd_req,
252 unsigned int which, u16 opcode); 252 unsigned int which, u16 opcode, u32 flags);
253 253
254extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, 254extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
255 unsigned int which, 255 unsigned int which,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
new file mode 100644
index 000000000000..93755a629299
--- /dev/null
+++ b/include/linux/cgroup-defs.h
@@ -0,0 +1,501 @@
1/*
2 * linux/cgroup-defs.h - basic definitions for cgroup
3 *
4 * This file provides basic type and interface. Include this file directly
5 * only if necessary to avoid cyclic dependencies.
6 */
7#ifndef _LINUX_CGROUP_DEFS_H
8#define _LINUX_CGROUP_DEFS_H
9
10#include <linux/limits.h>
11#include <linux/list.h>
12#include <linux/idr.h>
13#include <linux/wait.h>
14#include <linux/mutex.h>
15#include <linux/rcupdate.h>
16#include <linux/percpu-refcount.h>
17#include <linux/percpu-rwsem.h>
18#include <linux/workqueue.h>
19
20#ifdef CONFIG_CGROUPS
21
22struct cgroup;
23struct cgroup_root;
24struct cgroup_subsys;
25struct cgroup_taskset;
26struct kernfs_node;
27struct kernfs_ops;
28struct kernfs_open_file;
29struct seq_file;
30
31#define MAX_CGROUP_TYPE_NAMELEN 32
32#define MAX_CGROUP_ROOT_NAMELEN 64
33#define MAX_CFTYPE_NAME 64
34
35/* define the enumeration of all cgroup subsystems */
36#define SUBSYS(_x) _x ## _cgrp_id,
37enum cgroup_subsys_id {
38#include <linux/cgroup_subsys.h>
39 CGROUP_SUBSYS_COUNT,
40};
41#undef SUBSYS
42
43/* bits in struct cgroup_subsys_state flags field */
44enum {
45 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
46 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
47 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
48};
49
50/* bits in struct cgroup flags field */
51enum {
52 /* Control Group requires release notifications to userspace */
53 CGRP_NOTIFY_ON_RELEASE,
54 /*
55 * Clone the parent's configuration when creating a new child
56 * cpuset cgroup. For historical reasons, this option can be
57 * specified at mount time and thus is implemented here.
58 */
59 CGRP_CPUSET_CLONE_CHILDREN,
60};
61
62/* cgroup_root->flags */
63enum {
64 CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
65 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
66 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
67};
68
69/* cftype->flags */
70enum {
71 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
72 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
73 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
74
75 /* internal flags, do not use outside cgroup core proper */
76 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
77 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
78};
79
80/*
81 * Per-subsystem/per-cgroup state maintained by the system. This is the
82 * fundamental structural building block that controllers deal with.
83 *
84 * Fields marked with "PI:" are public and immutable and may be accessed
85 * directly without synchronization.
86 */
87struct cgroup_subsys_state {
88 /* PI: the cgroup that this css is attached to */
89 struct cgroup *cgroup;
90
91 /* PI: the cgroup subsystem that this css is attached to */
92 struct cgroup_subsys *ss;
93
94 /* reference count - access via css_[try]get() and css_put() */
95 struct percpu_ref refcnt;
96
97 /* PI: the parent css */
98 struct cgroup_subsys_state *parent;
99
100 /* siblings list anchored at the parent's ->children */
101 struct list_head sibling;
102 struct list_head children;
103
104 /*
105 * PI: Subsys-unique ID. 0 is unused and root is always 1. The
106 * matching css can be looked up using css_from_id().
107 */
108 int id;
109
110 unsigned int flags;
111
112 /*
113 * Monotonically increasing unique serial number which defines a
114 * uniform order among all csses. It's guaranteed that all
115 * ->children lists are in the ascending order of ->serial_nr and
116 * used to allow interrupting and resuming iterations.
117 */
118 u64 serial_nr;
119
120 /* percpu_ref killing and RCU release */
121 struct rcu_head rcu_head;
122 struct work_struct destroy_work;
123};
124
125/*
126 * A css_set is a structure holding pointers to a set of
127 * cgroup_subsys_state objects. This saves space in the task struct
128 * object and speeds up fork()/exit(), since a single inc/dec and a
129 * list_add()/del() can bump the reference count on the entire cgroup
130 * set for a task.
131 */
132struct css_set {
133 /* Reference count */
134 atomic_t refcount;
135
136 /*
137 * List running through all cgroup groups in the same hash
138 * slot. Protected by css_set_lock
139 */
140 struct hlist_node hlist;
141
142 /*
143 * Lists running through all tasks using this cgroup group.
144 * mg_tasks lists tasks which belong to this cset but are in the
145 * process of being migrated out or in. Protected by
146 * css_set_rwsem, but, during migration, once tasks are moved to
147 * mg_tasks, it can be read safely while holding cgroup_mutex.
148 */
149 struct list_head tasks;
150 struct list_head mg_tasks;
151
152 /*
153 * List of cgrp_cset_links pointing at cgroups referenced from this
154 * css_set. Protected by css_set_lock.
155 */
156 struct list_head cgrp_links;
157
158 /* the default cgroup associated with this css_set */
159 struct cgroup *dfl_cgrp;
160
161 /*
162 * Set of subsystem states, one for each subsystem. This array is
163 * immutable after creation apart from the init_css_set during
164 * subsystem registration (at boot time).
165 */
166 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
167
168 /*
169 * List of csets participating in the on-going migration either as
170 * source or destination. Protected by cgroup_mutex.
171 */
172 struct list_head mg_preload_node;
173 struct list_head mg_node;
174
175 /*
176 * If this cset is acting as the source of migration the following
177 * two fields are set. mg_src_cgrp is the source cgroup of the
178 * on-going migration and mg_dst_cset is the destination cset the
179 * target tasks on this cset should be migrated to. Protected by
180 * cgroup_mutex.
181 */
182 struct cgroup *mg_src_cgrp;
183 struct css_set *mg_dst_cset;
184
185 /*
186 * On the default hierarhcy, ->subsys[ssid] may point to a css
187 * attached to an ancestor instead of the cgroup this css_set is
188 * associated with. The following node is anchored at
189 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
190 * iterate through all css's attached to a given cgroup.
191 */
192 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
193
194 /* For RCU-protected deletion */
195 struct rcu_head rcu_head;
196};
197
198struct cgroup {
199 /* self css with NULL ->ss, points back to this cgroup */
200 struct cgroup_subsys_state self;
201
202 unsigned long flags; /* "unsigned long" so bitops work */
203
204 /*
205 * idr allocated in-hierarchy ID.
206 *
207 * ID 0 is not used, the ID of the root cgroup is always 1, and a
208 * new cgroup will be assigned with a smallest available ID.
209 *
210 * Allocating/Removing ID must be protected by cgroup_mutex.
211 */
212 int id;
213
214 /*
215 * If this cgroup contains any tasks, it contributes one to
216 * populated_cnt. All children with non-zero popuplated_cnt of
217 * their own contribute one. The count is zero iff there's no task
218 * in this cgroup or its subtree.
219 */
220 int populated_cnt;
221
222 struct kernfs_node *kn; /* cgroup kernfs entry */
223 struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */
224 struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
225
226 /*
227 * The bitmask of subsystems enabled on the child cgroups.
228 * ->subtree_control is the one configured through
229 * "cgroup.subtree_control" while ->child_subsys_mask is the
230 * effective one which may have more subsystems enabled.
231 * Controller knobs are made available iff it's enabled in
232 * ->subtree_control.
233 */
234 unsigned int subtree_control;
235 unsigned int child_subsys_mask;
236
237 /* Private pointers for each registered subsystem */
238 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
239
240 struct cgroup_root *root;
241
242 /*
243 * List of cgrp_cset_links pointing at css_sets with tasks in this
244 * cgroup. Protected by css_set_lock.
245 */
246 struct list_head cset_links;
247
248 /*
249 * On the default hierarchy, a css_set for a cgroup with some
250 * susbsys disabled will point to css's which are associated with
251 * the closest ancestor which has the subsys enabled. The
252 * following lists all css_sets which point to this cgroup's css
253 * for the given subsystem.
254 */
255 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
256
257 /*
258 * list of pidlists, up to two for each namespace (one for procs, one
259 * for tasks); created on demand.
260 */
261 struct list_head pidlists;
262 struct mutex pidlist_mutex;
263
264 /* used to wait for offlining of csses */
265 wait_queue_head_t offline_waitq;
266
267 /* used to schedule release agent */
268 struct work_struct release_agent_work;
269};
270
271/*
272 * A cgroup_root represents the root of a cgroup hierarchy, and may be
273 * associated with a kernfs_root to form an active hierarchy. This is
274 * internal to cgroup core. Don't access directly from controllers.
275 */
276struct cgroup_root {
277 struct kernfs_root *kf_root;
278
279 /* The bitmask of subsystems attached to this hierarchy */
280 unsigned int subsys_mask;
281
282 /* Unique id for this hierarchy. */
283 int hierarchy_id;
284
285 /* The root cgroup. Root is destroyed on its release. */
286 struct cgroup cgrp;
287
288 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
289 atomic_t nr_cgrps;
290
291 /* A list running through the active hierarchies */
292 struct list_head root_list;
293
294 /* Hierarchy-specific flags */
295 unsigned int flags;
296
297 /* IDs for cgroups in this hierarchy */
298 struct idr cgroup_idr;
299
300 /* The path to use for release notifications. */
301 char release_agent_path[PATH_MAX];
302
303 /* The name for this hierarchy - may be empty */
304 char name[MAX_CGROUP_ROOT_NAMELEN];
305};
306
307/*
308 * struct cftype: handler definitions for cgroup control files
309 *
310 * When reading/writing to a file:
311 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
312 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
313 */
314struct cftype {
315 /*
316 * By convention, the name should begin with the name of the
317 * subsystem, followed by a period. Zero length string indicates
318 * end of cftype array.
319 */
320 char name[MAX_CFTYPE_NAME];
321 int private;
322 /*
323 * If not 0, file mode is set to this value, otherwise it will
324 * be figured out automatically
325 */
326 umode_t mode;
327
328 /*
329 * The maximum length of string, excluding trailing nul, that can
330 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
331 */
332 size_t max_write_len;
333
334 /* CFTYPE_* flags */
335 unsigned int flags;
336
337 /*
338 * Fields used for internal bookkeeping. Initialized automatically
339 * during registration.
340 */
341 struct cgroup_subsys *ss; /* NULL for cgroup core files */
342 struct list_head node; /* anchored at ss->cfts */
343 struct kernfs_ops *kf_ops;
344
345 /*
346 * read_u64() is a shortcut for the common case of returning a
347 * single integer. Use it in place of read()
348 */
349 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
350 /*
351 * read_s64() is a signed version of read_u64()
352 */
353 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
354
355 /* generic seq_file read interface */
356 int (*seq_show)(struct seq_file *sf, void *v);
357
358 /* optional ops, implement all or none */
359 void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
360 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
361 void (*seq_stop)(struct seq_file *sf, void *v);
362
363 /*
364 * write_u64() is a shortcut for the common case of accepting
365 * a single integer (as parsed by simple_strtoull) from
366 * userspace. Use in place of write(); return 0 or error.
367 */
368 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
369 u64 val);
370 /*
371 * write_s64() is a signed version of write_u64()
372 */
373 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
374 s64 val);
375
376 /*
377 * write() is the generic write callback which maps directly to
378 * kernfs write operation and overrides all other operations.
379 * Maximum write size is determined by ->max_write_len. Use
380 * of_css/cft() to access the associated css and cft.
381 */
382 ssize_t (*write)(struct kernfs_open_file *of,
383 char *buf, size_t nbytes, loff_t off);
384
385#ifdef CONFIG_DEBUG_LOCK_ALLOC
386 struct lock_class_key lockdep_key;
387#endif
388};
389
390/*
391 * Control Group subsystem type.
392 * See Documentation/cgroups/cgroups.txt for details
393 */
394struct cgroup_subsys {
395 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
396 int (*css_online)(struct cgroup_subsys_state *css);
397 void (*css_offline)(struct cgroup_subsys_state *css);
398 void (*css_released)(struct cgroup_subsys_state *css);
399 void (*css_free)(struct cgroup_subsys_state *css);
400 void (*css_reset)(struct cgroup_subsys_state *css);
401 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
402
403 int (*can_attach)(struct cgroup_subsys_state *css,
404 struct cgroup_taskset *tset);
405 void (*cancel_attach)(struct cgroup_subsys_state *css,
406 struct cgroup_taskset *tset);
407 void (*attach)(struct cgroup_subsys_state *css,
408 struct cgroup_taskset *tset);
409 void (*fork)(struct task_struct *task);
410 void (*exit)(struct cgroup_subsys_state *css,
411 struct cgroup_subsys_state *old_css,
412 struct task_struct *task);
413 void (*bind)(struct cgroup_subsys_state *root_css);
414
415 int disabled;
416 int early_init;
417
418 /*
419 * If %false, this subsystem is properly hierarchical -
420 * configuration, resource accounting and restriction on a parent
421 * cgroup cover those of its children. If %true, hierarchy support
422 * is broken in some ways - some subsystems ignore hierarchy
423 * completely while others are only implemented half-way.
424 *
425 * It's now disallowed to create nested cgroups if the subsystem is
426 * broken and cgroup core will emit a warning message on such
427 * cases. Eventually, all subsystems will be made properly
428 * hierarchical and this will go away.
429 */
430 bool broken_hierarchy;
431 bool warned_broken_hierarchy;
432
433 /* the following two fields are initialized automtically during boot */
434 int id;
435 const char *name;
436
437 /* link to parent, protected by cgroup_lock() */
438 struct cgroup_root *root;
439
440 /* idr for css->id */
441 struct idr css_idr;
442
443 /*
444 * List of cftypes. Each entry is the first entry of an array
445 * terminated by zero length name.
446 */
447 struct list_head cfts;
448
449 /*
450 * Base cftypes which are automatically registered. The two can
451 * point to the same array.
452 */
453 struct cftype *dfl_cftypes; /* for the default hierarchy */
454 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
455
456 /*
457 * A subsystem may depend on other subsystems. When such subsystem
458 * is enabled on a cgroup, the depended-upon subsystems are enabled
459 * together if available. Subsystems enabled due to dependency are
460 * not visible to userland until explicitly enabled. The following
461 * specifies the mask of subsystems that this one depends on.
462 */
463 unsigned int depends_on;
464};
465
466extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
467
468/**
469 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
470 * @tsk: target task
471 *
472 * Called from threadgroup_change_begin() and allows cgroup operations to
473 * synchronize against threadgroup changes using a percpu_rw_semaphore.
474 */
475static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
476{
477 percpu_down_read(&cgroup_threadgroup_rwsem);
478}
479
480/**
481 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
482 * @tsk: target task
483 *
484 * Called from threadgroup_change_end(). Counterpart of
485 * cgroup_threadcgroup_change_begin().
486 */
487static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
488{
489 percpu_up_read(&cgroup_threadgroup_rwsem);
490}
491
492#else /* CONFIG_CGROUPS */
493
494#define CGROUP_SUBSYS_COUNT 0
495
496static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
497static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
498
499#endif /* CONFIG_CGROUPS */
500
501#endif /* _LINUX_CGROUP_DEFS_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b9cb94c3102a..a593e299162e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -11,94 +11,200 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/nodemask.h> 13#include <linux/nodemask.h>
14#include <linux/rcupdate.h>
15#include <linux/rculist.h> 14#include <linux/rculist.h>
16#include <linux/cgroupstats.h> 15#include <linux/cgroupstats.h>
17#include <linux/rwsem.h> 16#include <linux/rwsem.h>
18#include <linux/idr.h>
19#include <linux/workqueue.h>
20#include <linux/fs.h> 17#include <linux/fs.h>
21#include <linux/percpu-refcount.h>
22#include <linux/seq_file.h> 18#include <linux/seq_file.h>
23#include <linux/kernfs.h> 19#include <linux/kernfs.h>
24#include <linux/wait.h> 20
21#include <linux/cgroup-defs.h>
25 22
26#ifdef CONFIG_CGROUPS 23#ifdef CONFIG_CGROUPS
27 24
28struct cgroup_root; 25/* a css_task_iter should be treated as an opaque object */
29struct cgroup_subsys; 26struct css_task_iter {
30struct cgroup; 27 struct cgroup_subsys *ss;
31 28
32extern int cgroup_init_early(void); 29 struct list_head *cset_pos;
33extern int cgroup_init(void); 30 struct list_head *cset_head;
34extern void cgroup_fork(struct task_struct *p);
35extern void cgroup_post_fork(struct task_struct *p);
36extern void cgroup_exit(struct task_struct *p);
37extern int cgroupstats_build(struct cgroupstats *stats,
38 struct dentry *dentry);
39 31
40extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 32 struct list_head *task_pos;
41 struct pid *pid, struct task_struct *tsk); 33 struct list_head *tasks_head;
34 struct list_head *mg_tasks_head;
35};
42 36
43/* define the enumeration of all cgroup subsystems */ 37extern struct cgroup_root cgrp_dfl_root;
44#define SUBSYS(_x) _x ## _cgrp_id, 38extern struct css_set init_css_set;
45enum cgroup_subsys_id { 39
40#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
46#include <linux/cgroup_subsys.h> 41#include <linux/cgroup_subsys.h>
47 CGROUP_SUBSYS_COUNT,
48};
49#undef SUBSYS 42#undef SUBSYS
50 43
44bool css_has_online_children(struct cgroup_subsys_state *css);
45struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
46struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
47 struct cgroup_subsys *ss);
48struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
49 struct cgroup_subsys *ss);
50
51bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
52int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
53int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
54
55int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
56int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
57int cgroup_rm_cftypes(struct cftype *cfts);
58
59char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
60int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
61int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
62 struct pid *pid, struct task_struct *tsk);
63
64void cgroup_fork(struct task_struct *p);
65void cgroup_post_fork(struct task_struct *p);
66void cgroup_exit(struct task_struct *p);
67
68int cgroup_init_early(void);
69int cgroup_init(void);
70
51/* 71/*
52 * Per-subsystem/per-cgroup state maintained by the system. This is the 72 * Iteration helpers and macros.
53 * fundamental structural building block that controllers deal with. 73 */
74
75struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
76 struct cgroup_subsys_state *parent);
77struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
78 struct cgroup_subsys_state *css);
79struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
80struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
81 struct cgroup_subsys_state *css);
82
83struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
84struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
85
86void css_task_iter_start(struct cgroup_subsys_state *css,
87 struct css_task_iter *it);
88struct task_struct *css_task_iter_next(struct css_task_iter *it);
89void css_task_iter_end(struct css_task_iter *it);
90
91/**
92 * css_for_each_child - iterate through children of a css
93 * @pos: the css * to use as the loop cursor
94 * @parent: css whose children to walk
54 * 95 *
55 * Fields marked with "PI:" are public and immutable and may be accessed 96 * Walk @parent's children. Must be called under rcu_read_lock().
56 * directly without synchronization. 97 *
98 * If a subsystem synchronizes ->css_online() and the start of iteration, a
99 * css which finished ->css_online() is guaranteed to be visible in the
100 * future iterations and will stay visible until the last reference is put.
101 * A css which hasn't finished ->css_online() or already finished
102 * ->css_offline() may show up during traversal. It's each subsystem's
103 * responsibility to synchronize against on/offlining.
104 *
105 * It is allowed to temporarily drop RCU read lock during iteration. The
106 * caller is responsible for ensuring that @pos remains accessible until
107 * the start of the next iteration by, for example, bumping the css refcnt.
57 */ 108 */
58struct cgroup_subsys_state { 109#define css_for_each_child(pos, parent) \
59 /* PI: the cgroup that this css is attached to */ 110 for ((pos) = css_next_child(NULL, (parent)); (pos); \
60 struct cgroup *cgroup; 111 (pos) = css_next_child((pos), (parent)))
61
62 /* PI: the cgroup subsystem that this css is attached to */
63 struct cgroup_subsys *ss;
64
65 /* reference count - access via css_[try]get() and css_put() */
66 struct percpu_ref refcnt;
67
68 /* PI: the parent css */
69 struct cgroup_subsys_state *parent;
70
71 /* siblings list anchored at the parent's ->children */
72 struct list_head sibling;
73 struct list_head children;
74
75 /*
76 * PI: Subsys-unique ID. 0 is unused and root is always 1. The
77 * matching css can be looked up using css_from_id().
78 */
79 int id;
80
81 unsigned int flags;
82
83 /*
84 * Monotonically increasing unique serial number which defines a
85 * uniform order among all csses. It's guaranteed that all
86 * ->children lists are in the ascending order of ->serial_nr and
87 * used to allow interrupting and resuming iterations.
88 */
89 u64 serial_nr;
90
91 /* percpu_ref killing and RCU release */
92 struct rcu_head rcu_head;
93 struct work_struct destroy_work;
94};
95 112
96/* bits in struct cgroup_subsys_state flags field */ 113/**
97enum { 114 * css_for_each_descendant_pre - pre-order walk of a css's descendants
98 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 115 * @pos: the css * to use as the loop cursor
99 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 116 * @root: css whose descendants to walk
100 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 117 *
101}; 118 * Walk @root's descendants. @root is included in the iteration and the
119 * first node to be visited. Must be called under rcu_read_lock().
120 *
121 * If a subsystem synchronizes ->css_online() and the start of iteration, a
122 * css which finished ->css_online() is guaranteed to be visible in the
123 * future iterations and will stay visible until the last reference is put.
124 * A css which hasn't finished ->css_online() or already finished
125 * ->css_offline() may show up during traversal. It's each subsystem's
126 * responsibility to synchronize against on/offlining.
127 *
128 * For example, the following guarantees that a descendant can't escape
129 * state updates of its ancestors.
130 *
131 * my_online(@css)
132 * {
133 * Lock @css's parent and @css;
134 * Inherit state from the parent;
135 * Unlock both.
136 * }
137 *
138 * my_update_state(@css)
139 * {
140 * css_for_each_descendant_pre(@pos, @css) {
141 * Lock @pos;
142 * if (@pos == @css)
143 * Update @css's state;
144 * else
145 * Verify @pos is alive and inherit state from its parent;
146 * Unlock @pos;
147 * }
148 * }
149 *
150 * As long as the inheriting step, including checking the parent state, is
151 * enclosed inside @pos locking, double-locking the parent isn't necessary
152 * while inheriting. The state update to the parent is guaranteed to be
153 * visible by walking order and, as long as inheriting operations to the
154 * same @pos are atomic to each other, multiple updates racing each other
155 * still result in the correct state. It's guaranateed that at least one
156 * inheritance happens for any css after the latest update to its parent.
157 *
158 * If checking parent's state requires locking the parent, each inheriting
159 * iteration should lock and unlock both @pos->parent and @pos.
160 *
161 * Alternatively, a subsystem may choose to use a single global lock to
162 * synchronize ->css_online() and ->css_offline() against tree-walking
163 * operations.
164 *
165 * It is allowed to temporarily drop RCU read lock during iteration. The
166 * caller is responsible for ensuring that @pos remains accessible until
167 * the start of the next iteration by, for example, bumping the css refcnt.
168 */
169#define css_for_each_descendant_pre(pos, css) \
170 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
171 (pos) = css_next_descendant_pre((pos), (css)))
172
173/**
174 * css_for_each_descendant_post - post-order walk of a css's descendants
175 * @pos: the css * to use as the loop cursor
176 * @css: css whose descendants to walk
177 *
178 * Similar to css_for_each_descendant_pre() but performs post-order
179 * traversal instead. @root is included in the iteration and the last
180 * node to be visited.
181 *
182 * If a subsystem synchronizes ->css_online() and the start of iteration, a
183 * css which finished ->css_online() is guaranteed to be visible in the
184 * future iterations and will stay visible until the last reference is put.
185 * A css which hasn't finished ->css_online() or already finished
186 * ->css_offline() may show up during traversal. It's each subsystem's
187 * responsibility to synchronize against on/offlining.
188 *
189 * Note that the walk visibility guarantee example described in pre-order
190 * walk doesn't apply the same to post-order walks.
191 */
192#define css_for_each_descendant_post(pos, css) \
193 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
194 (pos) = css_next_descendant_post((pos), (css)))
195
196/**
197 * cgroup_taskset_for_each - iterate cgroup_taskset
198 * @task: the loop cursor
199 * @tset: taskset to iterate
200 */
201#define cgroup_taskset_for_each(task, tset) \
202 for ((task) = cgroup_taskset_first((tset)); (task); \
203 (task) = cgroup_taskset_next((tset)))
204
205/*
206 * Inline functions.
207 */
102 208
103/** 209/**
104 * css_get - obtain a reference on the specified css 210 * css_get - obtain a reference on the specified css
@@ -185,309 +291,112 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
185 percpu_ref_put_many(&css->refcnt, n); 291 percpu_ref_put_many(&css->refcnt, n);
186} 292}
187 293
188/* bits in struct cgroup flags field */ 294/**
189enum { 295 * task_css_set_check - obtain a task's css_set with extra access conditions
190 /* Control Group requires release notifications to userspace */ 296 * @task: the task to obtain css_set for
191 CGRP_NOTIFY_ON_RELEASE, 297 * @__c: extra condition expression to be passed to rcu_dereference_check()
192 /* 298 *
193 * Clone the parent's configuration when creating a new child 299 * A task's css_set is RCU protected, initialized and exited while holding
194 * cpuset cgroup. For historical reasons, this option can be 300 * task_lock(), and can only be modified while holding both cgroup_mutex
195 * specified at mount time and thus is implemented here. 301 * and task_lock() while the task is alive. This macro verifies that the
196 */ 302 * caller is inside proper critical section and returns @task's css_set.
197 CGRP_CPUSET_CLONE_CHILDREN, 303 *
198}; 304 * The caller can also specify additional allowed conditions via @__c, such
199 305 * as locks used during the cgroup_subsys::attach() methods.
200struct cgroup {
201 /* self css with NULL ->ss, points back to this cgroup */
202 struct cgroup_subsys_state self;
203
204 unsigned long flags; /* "unsigned long" so bitops work */
205
206 /*
207 * idr allocated in-hierarchy ID.
208 *
209 * ID 0 is not used, the ID of the root cgroup is always 1, and a
210 * new cgroup will be assigned with a smallest available ID.
211 *
212 * Allocating/Removing ID must be protected by cgroup_mutex.
213 */
214 int id;
215
216 /*
217 * If this cgroup contains any tasks, it contributes one to
218 * populated_cnt. All children with non-zero popuplated_cnt of
219 * their own contribute one. The count is zero iff there's no task
220 * in this cgroup or its subtree.
221 */
222 int populated_cnt;
223
224 struct kernfs_node *kn; /* cgroup kernfs entry */
225 struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
226
227 /*
228 * The bitmask of subsystems enabled on the child cgroups.
229 * ->subtree_control is the one configured through
230 * "cgroup.subtree_control" while ->child_subsys_mask is the
231 * effective one which may have more subsystems enabled.
232 * Controller knobs are made available iff it's enabled in
233 * ->subtree_control.
234 */
235 unsigned int subtree_control;
236 unsigned int child_subsys_mask;
237
238 /* Private pointers for each registered subsystem */
239 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
240
241 struct cgroup_root *root;
242
243 /*
244 * List of cgrp_cset_links pointing at css_sets with tasks in this
245 * cgroup. Protected by css_set_lock.
246 */
247 struct list_head cset_links;
248
249 /*
250 * On the default hierarchy, a css_set for a cgroup with some
251 * susbsys disabled will point to css's which are associated with
252 * the closest ancestor which has the subsys enabled. The
253 * following lists all css_sets which point to this cgroup's css
254 * for the given subsystem.
255 */
256 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
257
258 /*
259 * list of pidlists, up to two for each namespace (one for procs, one
260 * for tasks); created on demand.
261 */
262 struct list_head pidlists;
263 struct mutex pidlist_mutex;
264
265 /* used to wait for offlining of csses */
266 wait_queue_head_t offline_waitq;
267
268 /* used to schedule release agent */
269 struct work_struct release_agent_work;
270};
271
272#define MAX_CGROUP_ROOT_NAMELEN 64
273
274/* cgroup_root->flags */
275enum {
276 CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
277 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
278 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
279};
280
281/*
282 * A cgroup_root represents the root of a cgroup hierarchy, and may be
283 * associated with a kernfs_root to form an active hierarchy. This is
284 * internal to cgroup core. Don't access directly from controllers.
285 */ 306 */
286struct cgroup_root { 307#ifdef CONFIG_PROVE_RCU
287 struct kernfs_root *kf_root; 308extern struct mutex cgroup_mutex;
288 309extern struct rw_semaphore css_set_rwsem;
289 /* The bitmask of subsystems attached to this hierarchy */ 310#define task_css_set_check(task, __c) \
290 unsigned int subsys_mask; 311 rcu_dereference_check((task)->cgroups, \
291 312 lockdep_is_held(&cgroup_mutex) || \
292 /* Unique id for this hierarchy. */ 313 lockdep_is_held(&css_set_rwsem) || \
293 int hierarchy_id; 314 ((task)->flags & PF_EXITING) || (__c))
294 315#else
295 /* The root cgroup. Root is destroyed on its release. */ 316#define task_css_set_check(task, __c) \
296 struct cgroup cgrp; 317 rcu_dereference((task)->cgroups)
297 318#endif
298 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
299 atomic_t nr_cgrps;
300
301 /* A list running through the active hierarchies */
302 struct list_head root_list;
303
304 /* Hierarchy-specific flags */
305 unsigned int flags;
306
307 /* IDs for cgroups in this hierarchy */
308 struct idr cgroup_idr;
309
310 /* The path to use for release notifications. */
311 char release_agent_path[PATH_MAX];
312
313 /* The name for this hierarchy - may be empty */
314 char name[MAX_CGROUP_ROOT_NAMELEN];
315};
316 319
317/* 320/**
318 * A css_set is a structure holding pointers to a set of 321 * task_css_check - obtain css for (task, subsys) w/ extra access conds
319 * cgroup_subsys_state objects. This saves space in the task struct 322 * @task: the target task
320 * object and speeds up fork()/exit(), since a single inc/dec and a 323 * @subsys_id: the target subsystem ID
321 * list_add()/del() can bump the reference count on the entire cgroup 324 * @__c: extra condition expression to be passed to rcu_dereference_check()
322 * set for a task. 325 *
326 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
327 * synchronization rules are the same as task_css_set_check().
323 */ 328 */
329#define task_css_check(task, subsys_id, __c) \
330 task_css_set_check((task), (__c))->subsys[(subsys_id)]
324 331
325struct css_set { 332/**
326 333 * task_css_set - obtain a task's css_set
327 /* Reference count */ 334 * @task: the task to obtain css_set for
328 atomic_t refcount;
329
330 /*
331 * List running through all cgroup groups in the same hash
332 * slot. Protected by css_set_lock
333 */
334 struct hlist_node hlist;
335
336 /*
337 * Lists running through all tasks using this cgroup group.
338 * mg_tasks lists tasks which belong to this cset but are in the
339 * process of being migrated out or in. Protected by
340 * css_set_rwsem, but, during migration, once tasks are moved to
341 * mg_tasks, it can be read safely while holding cgroup_mutex.
342 */
343 struct list_head tasks;
344 struct list_head mg_tasks;
345
346 /*
347 * List of cgrp_cset_links pointing at cgroups referenced from this
348 * css_set. Protected by css_set_lock.
349 */
350 struct list_head cgrp_links;
351
352 /* the default cgroup associated with this css_set */
353 struct cgroup *dfl_cgrp;
354
355 /*
356 * Set of subsystem states, one for each subsystem. This array is
357 * immutable after creation apart from the init_css_set during
358 * subsystem registration (at boot time).
359 */
360 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
361
362 /*
363 * List of csets participating in the on-going migration either as
364 * source or destination. Protected by cgroup_mutex.
365 */
366 struct list_head mg_preload_node;
367 struct list_head mg_node;
368
369 /*
370 * If this cset is acting as the source of migration the following
371 * two fields are set. mg_src_cgrp is the source cgroup of the
372 * on-going migration and mg_dst_cset is the destination cset the
373 * target tasks on this cset should be migrated to. Protected by
374 * cgroup_mutex.
375 */
376 struct cgroup *mg_src_cgrp;
377 struct css_set *mg_dst_cset;
378
379 /*
380 * On the default hierarhcy, ->subsys[ssid] may point to a css
381 * attached to an ancestor instead of the cgroup this css_set is
382 * associated with. The following node is anchored at
383 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
384 * iterate through all css's attached to a given cgroup.
385 */
386 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
387
388 /* For RCU-protected deletion */
389 struct rcu_head rcu_head;
390};
391
392/*
393 * struct cftype: handler definitions for cgroup control files
394 * 335 *
395 * When reading/writing to a file: 336 * See task_css_set_check().
396 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
397 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
398 */ 337 */
338static inline struct css_set *task_css_set(struct task_struct *task)
339{
340 return task_css_set_check(task, false);
341}
399 342
400/* cftype->flags */ 343/**
401enum { 344 * task_css - obtain css for (task, subsys)
402 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ 345 * @task: the target task
403 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ 346 * @subsys_id: the target subsystem ID
404 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ 347 *
348 * See task_css_check().
349 */
350static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
351 int subsys_id)
352{
353 return task_css_check(task, subsys_id, false);
354}
405 355
406 /* internal flags, do not use outside cgroup core proper */ 356/**
407 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ 357 * task_get_css - find and get the css for (task, subsys)
408 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ 358 * @task: the target task
409}; 359 * @subsys_id: the target subsystem ID
360 *
361 * Find the css for the (@task, @subsys_id) combination, increment a
362 * reference on and return it. This function is guaranteed to return a
363 * valid css.
364 */
365static inline struct cgroup_subsys_state *
366task_get_css(struct task_struct *task, int subsys_id)
367{
368 struct cgroup_subsys_state *css;
369
370 rcu_read_lock();
371 while (true) {
372 css = task_css(task, subsys_id);
373 if (likely(css_tryget_online(css)))
374 break;
375 cpu_relax();
376 }
377 rcu_read_unlock();
378 return css;
379}
410 380
411#define MAX_CFTYPE_NAME 64 381/**
412 382 * task_css_is_root - test whether a task belongs to the root css
413struct cftype { 383 * @task: the target task
414 /* 384 * @subsys_id: the target subsystem ID
415 * By convention, the name should begin with the name of the 385 *
416 * subsystem, followed by a period. Zero length string indicates 386 * Test whether @task belongs to the root css on the specified subsystem.
417 * end of cftype array. 387 * May be invoked in any context.
418 */ 388 */
419 char name[MAX_CFTYPE_NAME]; 389static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
420 int private; 390{
421 /* 391 return task_css_check(task, subsys_id, true) ==
422 * If not 0, file mode is set to this value, otherwise it will 392 init_css_set.subsys[subsys_id];
423 * be figured out automatically 393}
424 */
425 umode_t mode;
426
427 /*
428 * The maximum length of string, excluding trailing nul, that can
429 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
430 */
431 size_t max_write_len;
432
433 /* CFTYPE_* flags */
434 unsigned int flags;
435
436 /*
437 * Fields used for internal bookkeeping. Initialized automatically
438 * during registration.
439 */
440 struct cgroup_subsys *ss; /* NULL for cgroup core files */
441 struct list_head node; /* anchored at ss->cfts */
442 struct kernfs_ops *kf_ops;
443
444 /*
445 * read_u64() is a shortcut for the common case of returning a
446 * single integer. Use it in place of read()
447 */
448 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
449 /*
450 * read_s64() is a signed version of read_u64()
451 */
452 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
453
454 /* generic seq_file read interface */
455 int (*seq_show)(struct seq_file *sf, void *v);
456
457 /* optional ops, implement all or none */
458 void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
459 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
460 void (*seq_stop)(struct seq_file *sf, void *v);
461
462 /*
463 * write_u64() is a shortcut for the common case of accepting
464 * a single integer (as parsed by simple_strtoull) from
465 * userspace. Use in place of write(); return 0 or error.
466 */
467 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
468 u64 val);
469 /*
470 * write_s64() is a signed version of write_u64()
471 */
472 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
473 s64 val);
474
475 /*
476 * write() is the generic write callback which maps directly to
477 * kernfs write operation and overrides all other operations.
478 * Maximum write size is determined by ->max_write_len. Use
479 * of_css/cft() to access the associated css and cft.
480 */
481 ssize_t (*write)(struct kernfs_open_file *of,
482 char *buf, size_t nbytes, loff_t off);
483
484#ifdef CONFIG_DEBUG_LOCK_ALLOC
485 struct lock_class_key lockdep_key;
486#endif
487};
488 394
489extern struct cgroup_root cgrp_dfl_root; 395static inline struct cgroup *task_cgroup(struct task_struct *task,
490extern struct css_set init_css_set; 396 int subsys_id)
397{
398 return task_css(task, subsys_id)->cgroup;
399}
491 400
492/** 401/**
493 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy 402 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
@@ -604,367 +513,22 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
604 pr_cont_kernfs_path(cgrp->kn); 513 pr_cont_kernfs_path(cgrp->kn);
605} 514}
606 515
607char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
608
609int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
610int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
611int cgroup_rm_cftypes(struct cftype *cfts);
612
613bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
614
615/*
616 * Control Group taskset, used to pass around set of tasks to cgroup_subsys
617 * methods.
618 */
619struct cgroup_taskset;
620struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
621struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
622
623/**
624 * cgroup_taskset_for_each - iterate cgroup_taskset
625 * @task: the loop cursor
626 * @tset: taskset to iterate
627 */
628#define cgroup_taskset_for_each(task, tset) \
629 for ((task) = cgroup_taskset_first((tset)); (task); \
630 (task) = cgroup_taskset_next((tset)))
631
632/*
633 * Control Group subsystem type.
634 * See Documentation/cgroups/cgroups.txt for details
635 */
636
637struct cgroup_subsys {
638 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
639 int (*css_online)(struct cgroup_subsys_state *css);
640 void (*css_offline)(struct cgroup_subsys_state *css);
641 void (*css_released)(struct cgroup_subsys_state *css);
642 void (*css_free)(struct cgroup_subsys_state *css);
643 void (*css_reset)(struct cgroup_subsys_state *css);
644 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
645
646 int (*can_attach)(struct cgroup_subsys_state *css,
647 struct cgroup_taskset *tset);
648 void (*cancel_attach)(struct cgroup_subsys_state *css,
649 struct cgroup_taskset *tset);
650 void (*attach)(struct cgroup_subsys_state *css,
651 struct cgroup_taskset *tset);
652 void (*fork)(struct task_struct *task);
653 void (*exit)(struct cgroup_subsys_state *css,
654 struct cgroup_subsys_state *old_css,
655 struct task_struct *task);
656 void (*bind)(struct cgroup_subsys_state *root_css);
657
658 int disabled;
659 int early_init;
660
661 /*
662 * If %false, this subsystem is properly hierarchical -
663 * configuration, resource accounting and restriction on a parent
664 * cgroup cover those of its children. If %true, hierarchy support
665 * is broken in some ways - some subsystems ignore hierarchy
666 * completely while others are only implemented half-way.
667 *
668 * It's now disallowed to create nested cgroups if the subsystem is
669 * broken and cgroup core will emit a warning message on such
670 * cases. Eventually, all subsystems will be made properly
671 * hierarchical and this will go away.
672 */
673 bool broken_hierarchy;
674 bool warned_broken_hierarchy;
675
676 /* the following two fields are initialized automtically during boot */
677 int id;
678#define MAX_CGROUP_TYPE_NAMELEN 32
679 const char *name;
680
681 /* link to parent, protected by cgroup_lock() */
682 struct cgroup_root *root;
683
684 /* idr for css->id */
685 struct idr css_idr;
686
687 /*
688 * List of cftypes. Each entry is the first entry of an array
689 * terminated by zero length name.
690 */
691 struct list_head cfts;
692
693 /*
694 * Base cftypes which are automatically registered. The two can
695 * point to the same array.
696 */
697 struct cftype *dfl_cftypes; /* for the default hierarchy */
698 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
699
700 /*
701 * A subsystem may depend on other subsystems. When such subsystem
702 * is enabled on a cgroup, the depended-upon subsystems are enabled
703 * together if available. Subsystems enabled due to dependency are
704 * not visible to userland until explicitly enabled. The following
705 * specifies the mask of subsystems that this one depends on.
706 */
707 unsigned int depends_on;
708};
709
710#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
711#include <linux/cgroup_subsys.h>
712#undef SUBSYS
713
714/**
715 * task_css_set_check - obtain a task's css_set with extra access conditions
716 * @task: the task to obtain css_set for
717 * @__c: extra condition expression to be passed to rcu_dereference_check()
718 *
719 * A task's css_set is RCU protected, initialized and exited while holding
720 * task_lock(), and can only be modified while holding both cgroup_mutex
721 * and task_lock() while the task is alive. This macro verifies that the
722 * caller is inside proper critical section and returns @task's css_set.
723 *
724 * The caller can also specify additional allowed conditions via @__c, such
725 * as locks used during the cgroup_subsys::attach() methods.
726 */
727#ifdef CONFIG_PROVE_RCU
728extern struct mutex cgroup_mutex;
729extern struct rw_semaphore css_set_rwsem;
730#define task_css_set_check(task, __c) \
731 rcu_dereference_check((task)->cgroups, \
732 lockdep_is_held(&cgroup_mutex) || \
733 lockdep_is_held(&css_set_rwsem) || \
734 ((task)->flags & PF_EXITING) || (__c))
735#else
736#define task_css_set_check(task, __c) \
737 rcu_dereference((task)->cgroups)
738#endif
739
740/**
741 * task_css_check - obtain css for (task, subsys) w/ extra access conds
742 * @task: the target task
743 * @subsys_id: the target subsystem ID
744 * @__c: extra condition expression to be passed to rcu_dereference_check()
745 *
746 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
747 * synchronization rules are the same as task_css_set_check().
748 */
749#define task_css_check(task, subsys_id, __c) \
750 task_css_set_check((task), (__c))->subsys[(subsys_id)]
751
752/**
753 * task_css_set - obtain a task's css_set
754 * @task: the task to obtain css_set for
755 *
756 * See task_css_set_check().
757 */
758static inline struct css_set *task_css_set(struct task_struct *task)
759{
760 return task_css_set_check(task, false);
761}
762
763/**
764 * task_css - obtain css for (task, subsys)
765 * @task: the target task
766 * @subsys_id: the target subsystem ID
767 *
768 * See task_css_check().
769 */
770static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
771 int subsys_id)
772{
773 return task_css_check(task, subsys_id, false);
774}
775
776/**
777 * task_css_is_root - test whether a task belongs to the root css
778 * @task: the target task
779 * @subsys_id: the target subsystem ID
780 *
781 * Test whether @task belongs to the root css on the specified subsystem.
782 * May be invoked in any context.
783 */
784static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
785{
786 return task_css_check(task, subsys_id, true) ==
787 init_css_set.subsys[subsys_id];
788}
789
790static inline struct cgroup *task_cgroup(struct task_struct *task,
791 int subsys_id)
792{
793 return task_css(task, subsys_id)->cgroup;
794}
795
796struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
797 struct cgroup_subsys_state *parent);
798
799struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
800
801/**
802 * css_for_each_child - iterate through children of a css
803 * @pos: the css * to use as the loop cursor
804 * @parent: css whose children to walk
805 *
806 * Walk @parent's children. Must be called under rcu_read_lock().
807 *
808 * If a subsystem synchronizes ->css_online() and the start of iteration, a
809 * css which finished ->css_online() is guaranteed to be visible in the
810 * future iterations and will stay visible until the last reference is put.
811 * A css which hasn't finished ->css_online() or already finished
812 * ->css_offline() may show up during traversal. It's each subsystem's
813 * responsibility to synchronize against on/offlining.
814 *
815 * It is allowed to temporarily drop RCU read lock during iteration. The
816 * caller is responsible for ensuring that @pos remains accessible until
817 * the start of the next iteration by, for example, bumping the css refcnt.
818 */
819#define css_for_each_child(pos, parent) \
820 for ((pos) = css_next_child(NULL, (parent)); (pos); \
821 (pos) = css_next_child((pos), (parent)))
822
823struct cgroup_subsys_state *
824css_next_descendant_pre(struct cgroup_subsys_state *pos,
825 struct cgroup_subsys_state *css);
826
827struct cgroup_subsys_state *
828css_rightmost_descendant(struct cgroup_subsys_state *pos);
829
830/**
831 * css_for_each_descendant_pre - pre-order walk of a css's descendants
832 * @pos: the css * to use as the loop cursor
833 * @root: css whose descendants to walk
834 *
835 * Walk @root's descendants. @root is included in the iteration and the
836 * first node to be visited. Must be called under rcu_read_lock().
837 *
838 * If a subsystem synchronizes ->css_online() and the start of iteration, a
839 * css which finished ->css_online() is guaranteed to be visible in the
840 * future iterations and will stay visible until the last reference is put.
841 * A css which hasn't finished ->css_online() or already finished
842 * ->css_offline() may show up during traversal. It's each subsystem's
843 * responsibility to synchronize against on/offlining.
844 *
845 * For example, the following guarantees that a descendant can't escape
846 * state updates of its ancestors.
847 *
848 * my_online(@css)
849 * {
850 * Lock @css's parent and @css;
851 * Inherit state from the parent;
852 * Unlock both.
853 * }
854 *
855 * my_update_state(@css)
856 * {
857 * css_for_each_descendant_pre(@pos, @css) {
858 * Lock @pos;
859 * if (@pos == @css)
860 * Update @css's state;
861 * else
862 * Verify @pos is alive and inherit state from its parent;
863 * Unlock @pos;
864 * }
865 * }
866 *
867 * As long as the inheriting step, including checking the parent state, is
868 * enclosed inside @pos locking, double-locking the parent isn't necessary
869 * while inheriting. The state update to the parent is guaranteed to be
870 * visible by walking order and, as long as inheriting operations to the
871 * same @pos are atomic to each other, multiple updates racing each other
872 * still result in the correct state. It's guaranateed that at least one
873 * inheritance happens for any css after the latest update to its parent.
874 *
875 * If checking parent's state requires locking the parent, each inheriting
876 * iteration should lock and unlock both @pos->parent and @pos.
877 *
878 * Alternatively, a subsystem may choose to use a single global lock to
879 * synchronize ->css_online() and ->css_offline() against tree-walking
880 * operations.
881 *
882 * It is allowed to temporarily drop RCU read lock during iteration. The
883 * caller is responsible for ensuring that @pos remains accessible until
884 * the start of the next iteration by, for example, bumping the css refcnt.
885 */
886#define css_for_each_descendant_pre(pos, css) \
887 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
888 (pos) = css_next_descendant_pre((pos), (css)))
889
890struct cgroup_subsys_state *
891css_next_descendant_post(struct cgroup_subsys_state *pos,
892 struct cgroup_subsys_state *css);
893
894/**
895 * css_for_each_descendant_post - post-order walk of a css's descendants
896 * @pos: the css * to use as the loop cursor
897 * @css: css whose descendants to walk
898 *
899 * Similar to css_for_each_descendant_pre() but performs post-order
900 * traversal instead. @root is included in the iteration and the last
901 * node to be visited.
902 *
903 * If a subsystem synchronizes ->css_online() and the start of iteration, a
904 * css which finished ->css_online() is guaranteed to be visible in the
905 * future iterations and will stay visible until the last reference is put.
906 * A css which hasn't finished ->css_online() or already finished
907 * ->css_offline() may show up during traversal. It's each subsystem's
908 * responsibility to synchronize against on/offlining.
909 *
910 * Note that the walk visibility guarantee example described in pre-order
911 * walk doesn't apply the same to post-order walks.
912 */
913#define css_for_each_descendant_post(pos, css) \
914 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
915 (pos) = css_next_descendant_post((pos), (css)))
916
917bool css_has_online_children(struct cgroup_subsys_state *css);
918
919/* A css_task_iter should be treated as an opaque object */
920struct css_task_iter {
921 struct cgroup_subsys *ss;
922
923 struct list_head *cset_pos;
924 struct list_head *cset_head;
925
926 struct list_head *task_pos;
927 struct list_head *tasks_head;
928 struct list_head *mg_tasks_head;
929};
930
931void css_task_iter_start(struct cgroup_subsys_state *css,
932 struct css_task_iter *it);
933struct task_struct *css_task_iter_next(struct css_task_iter *it);
934void css_task_iter_end(struct css_task_iter *it);
935
936int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
937int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
938
939struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
940 struct cgroup_subsys *ss);
941struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
942 struct cgroup_subsys *ss);
943
944#else /* !CONFIG_CGROUPS */ 516#else /* !CONFIG_CGROUPS */
945 517
946struct cgroup_subsys_state; 518struct cgroup_subsys_state;
947 519
948static inline int cgroup_init_early(void) { return 0; } 520static inline void css_put(struct cgroup_subsys_state *css) {}
949static inline int cgroup_init(void) { return 0; } 521static inline int cgroup_attach_task_all(struct task_struct *from,
522 struct task_struct *t) { return 0; }
523static inline int cgroupstats_build(struct cgroupstats *stats,
524 struct dentry *dentry) { return -EINVAL; }
525
950static inline void cgroup_fork(struct task_struct *p) {} 526static inline void cgroup_fork(struct task_struct *p) {}
951static inline void cgroup_post_fork(struct task_struct *p) {} 527static inline void cgroup_post_fork(struct task_struct *p) {}
952static inline void cgroup_exit(struct task_struct *p) {} 528static inline void cgroup_exit(struct task_struct *p) {}
953 529
954static inline int cgroupstats_build(struct cgroupstats *stats, 530static inline int cgroup_init_early(void) { return 0; }
955 struct dentry *dentry) 531static inline int cgroup_init(void) { return 0; }
956{
957 return -EINVAL;
958}
959
960static inline void css_put(struct cgroup_subsys_state *css) {}
961
962/* No cgroups - nothing to do */
963static inline int cgroup_attach_task_all(struct task_struct *from,
964 struct task_struct *t)
965{
966 return 0;
967}
968 532
969#endif /* !CONFIG_CGROUPS */ 533#endif /* !CONFIG_CGROUPS */
970 534
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index df695313f975..78842f46f152 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -31,6 +31,7 @@
31#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ 31#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
32#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ 32#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
33#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ 33#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
34#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
34 35
35struct clk_hw; 36struct clk_hw;
36struct clk_core; 37struct clk_core;
@@ -209,7 +210,7 @@ struct clk_ops {
209struct clk_init_data { 210struct clk_init_data {
210 const char *name; 211 const char *name;
211 const struct clk_ops *ops; 212 const struct clk_ops *ops;
212 const char **parent_names; 213 const char * const *parent_names;
213 u8 num_parents; 214 u8 num_parents;
214 unsigned long flags; 215 unsigned long flags;
215}; 216};
@@ -426,12 +427,14 @@ extern const struct clk_ops clk_mux_ops;
426extern const struct clk_ops clk_mux_ro_ops; 427extern const struct clk_ops clk_mux_ro_ops;
427 428
428struct clk *clk_register_mux(struct device *dev, const char *name, 429struct clk *clk_register_mux(struct device *dev, const char *name,
429 const char **parent_names, u8 num_parents, unsigned long flags, 430 const char * const *parent_names, u8 num_parents,
431 unsigned long flags,
430 void __iomem *reg, u8 shift, u8 width, 432 void __iomem *reg, u8 shift, u8 width,
431 u8 clk_mux_flags, spinlock_t *lock); 433 u8 clk_mux_flags, spinlock_t *lock);
432 434
433struct clk *clk_register_mux_table(struct device *dev, const char *name, 435struct clk *clk_register_mux_table(struct device *dev, const char *name,
434 const char **parent_names, u8 num_parents, unsigned long flags, 436 const char * const *parent_names, u8 num_parents,
437 unsigned long flags,
435 void __iomem *reg, u8 shift, u32 mask, 438 void __iomem *reg, u8 shift, u32 mask,
436 u8 clk_mux_flags, u32 *table, spinlock_t *lock); 439 u8 clk_mux_flags, u32 *table, spinlock_t *lock);
437 440
@@ -457,7 +460,7 @@ struct clk_fixed_factor {
457 unsigned int div; 460 unsigned int div;
458}; 461};
459 462
460extern struct clk_ops clk_fixed_factor_ops; 463extern const struct clk_ops clk_fixed_factor_ops;
461struct clk *clk_register_fixed_factor(struct device *dev, const char *name, 464struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
462 const char *parent_name, unsigned long flags, 465 const char *parent_name, unsigned long flags,
463 unsigned int mult, unsigned int div); 466 unsigned int mult, unsigned int div);
@@ -518,7 +521,7 @@ struct clk_composite {
518}; 521};
519 522
520struct clk *clk_register_composite(struct device *dev, const char *name, 523struct clk *clk_register_composite(struct device *dev, const char *name,
521 const char **parent_names, int num_parents, 524 const char * const *parent_names, int num_parents,
522 struct clk_hw *mux_hw, const struct clk_ops *mux_ops, 525 struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
523 struct clk_hw *rate_hw, const struct clk_ops *rate_ops, 526 struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
524 struct clk_hw *gate_hw, const struct clk_ops *gate_ops, 527 struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -589,6 +592,7 @@ long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
589 unsigned long max_rate, 592 unsigned long max_rate,
590 unsigned long *best_parent_rate, 593 unsigned long *best_parent_rate,
591 struct clk_hw **best_parent_p); 594 struct clk_hw **best_parent_p);
595void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
592 596
593static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) 597static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
594{ 598{
@@ -624,6 +628,8 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
624 void *data); 628 void *data);
625struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); 629struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
626int of_clk_get_parent_count(struct device_node *np); 630int of_clk_get_parent_count(struct device_node *np);
631int of_clk_parent_fill(struct device_node *np, const char **parents,
632 unsigned int size);
627const char *of_clk_get_parent_name(struct device_node *np, int index); 633const char *of_clk_get_parent_name(struct device_node *np, int index);
628 634
629void of_clk_init(const struct of_device_id *matches); 635void of_clk_init(const struct of_device_id *matches);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 68c16a6bedb3..0df4a51e1a78 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -306,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk);
306 * @clk: clock source 306 * @clk: clock source
307 * @rate: desired clock rate in Hz 307 * @rate: desired clock rate in Hz
308 * 308 *
309 * This answers the question "if I were to pass @rate to clk_set_rate(),
310 * what clock rate would I end up with?" without changing the hardware
311 * in any way. In other words:
312 *
313 * rate = clk_round_rate(clk, r);
314 *
315 * and:
316 *
317 * clk_set_rate(clk, r);
318 * rate = clk_get_rate(clk);
319 *
320 * are equivalent except the former does not modify the clock hardware
321 * in any way.
322 *
309 * Returns rounded clock rate in Hz, or negative errno. 323 * Returns rounded clock rate in Hz, or negative errno.
310 */ 324 */
311long clk_round_rate(struct clk *clk, unsigned long rate); 325long clk_round_rate(struct clk *clk, unsigned long rate);
@@ -471,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
471 clk_unprepare(clk); 485 clk_unprepare(clk);
472} 486}
473 487
474/**
475 * clk_add_alias - add a new clock alias
476 * @alias: name for clock alias
477 * @alias_dev_name: device name
478 * @id: platform specific clock name
479 * @dev: device
480 *
481 * Allows using generic clock names for drivers by adding a new alias.
482 * Assumes clkdev, see clkdev.h for more info.
483 */
484int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
485 struct device *dev);
486
487struct device_node; 488struct device_node;
488struct of_phandle_args; 489struct of_phandle_args;
489 490
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 94bad77eeb4a..08bffcc466de 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
22 const char *dev_id; 22 const char *dev_id;
23 const char *con_id; 23 const char *con_id;
24 struct clk *clk; 24 struct clk *clk;
25 struct clk_hw *clk_hw;
25}; 26};
26 27
27#define CLKDEV_INIT(d, n, c) \ 28#define CLKDEV_INIT(d, n, c) \
@@ -32,15 +33,19 @@ struct clk_lookup {
32 } 33 }
33 34
34struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 35struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
35 const char *dev_fmt, ...); 36 const char *dev_fmt, ...) __printf(3, 4);
36 37
37void clkdev_add(struct clk_lookup *cl); 38void clkdev_add(struct clk_lookup *cl);
38void clkdev_drop(struct clk_lookup *cl); 39void clkdev_drop(struct clk_lookup *cl);
39 40
41struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
42 const char *dev_fmt, ...) __printf(3, 4);
43
40void clkdev_add_table(struct clk_lookup *, size_t); 44void clkdev_add_table(struct clk_lookup *, size_t);
41int clk_add_alias(const char *, const char *, char *, struct device *); 45int clk_add_alias(const char *, const char *, const char *, struct device *);
42 46
43int clk_register_clkdev(struct clk *, const char *, const char *, ...); 47int clk_register_clkdev(struct clk *, const char *, const char *, ...)
48 __printf(3, 4);
44int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); 49int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
45 50
46#ifdef CONFIG_COMMON_CLK 51#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ab25814690bc..a76c9172b2eb 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
424 424
425asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 425asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
426 426
427extern int compat_printk(const char *fmt, ...); 427extern __printf(1, 2) int compat_printk(const char *fmt, ...);
428extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat); 428extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
429extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set); 429extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
430 430
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 371e560d13cf..dfaa7b3e9ae9 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -5,9 +5,9 @@
5/* 5/*
6 * Common definitions for all gcc versions go here. 6 * Common definitions for all gcc versions go here.
7 */ 7 */
8#define GCC_VERSION (__GNUC__ * 10000 \ 8#define GCC_VERSION (__GNUC__ * 10000 \
9 + __GNUC_MINOR__ * 100 \ 9 + __GNUC_MINOR__ * 100 \
10 + __GNUC_PATCHLEVEL__) 10 + __GNUC_PATCHLEVEL__)
11 11
12/* Optimization barrier */ 12/* Optimization barrier */
13 13
@@ -46,55 +46,63 @@
46 * the inline assembly constraint from =g to =r, in this particular 46 * the inline assembly constraint from =g to =r, in this particular
47 * case either is valid. 47 * case either is valid.
48 */ 48 */
49#define RELOC_HIDE(ptr, off) \ 49#define RELOC_HIDE(ptr, off) \
50 ({ unsigned long __ptr; \ 50({ \
51 __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ 51 unsigned long __ptr; \
52 (typeof(ptr)) (__ptr + (off)); }) 52 __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
53 (typeof(ptr)) (__ptr + (off)); \
54})
53 55
54/* Make the optimizer believe the variable can be manipulated arbitrarily. */ 56/* Make the optimizer believe the variable can be manipulated arbitrarily. */
55#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) 57#define OPTIMIZER_HIDE_VAR(var) \
58 __asm__ ("" : "=r" (var) : "0" (var))
56 59
57#ifdef __CHECKER__ 60#ifdef __CHECKER__
58#define __must_be_array(arr) 0 61#define __must_be_array(a) 0
59#else 62#else
60/* &a[0] degrades to a pointer: a different type from an array */ 63/* &a[0] degrades to a pointer: a different type from an array */
61#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) 64#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
62#endif 65#endif
63 66
64/* 67/*
65 * Force always-inline if the user requests it so via the .config, 68 * Force always-inline if the user requests it so via the .config,
66 * or if gcc is too old: 69 * or if gcc is too old:
67 */ 70 */
68#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ 71#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
69 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) 72 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
70# define inline inline __attribute__((always_inline)) notrace 73#define inline inline __attribute__((always_inline)) notrace
71# define __inline__ __inline__ __attribute__((always_inline)) notrace 74#define __inline__ __inline__ __attribute__((always_inline)) notrace
72# define __inline __inline __attribute__((always_inline)) notrace 75#define __inline __inline __attribute__((always_inline)) notrace
73#else 76#else
74/* A lot of inline functions can cause havoc with function tracing */ 77/* A lot of inline functions can cause havoc with function tracing */
75# define inline inline notrace 78#define inline inline notrace
76# define __inline__ __inline__ notrace 79#define __inline__ __inline__ notrace
77# define __inline __inline notrace 80#define __inline __inline notrace
78#endif 81#endif
79 82
80#define __deprecated __attribute__((deprecated)) 83#define __always_inline inline __attribute__((always_inline))
81#define __packed __attribute__((packed)) 84#define noinline __attribute__((noinline))
82#define __weak __attribute__((weak)) 85
83#define __alias(symbol) __attribute__((alias(#symbol))) 86#define __deprecated __attribute__((deprecated))
87#define __packed __attribute__((packed))
88#define __weak __attribute__((weak))
89#define __alias(symbol) __attribute__((alias(#symbol)))
84 90
85/* 91/*
86 * it doesn't make sense on ARM (currently the only user of __naked) to trace 92 * it doesn't make sense on ARM (currently the only user of __naked)
87 * naked functions because then mcount is called without stack and frame pointer 93 * to trace naked functions because then mcount is called without
88 * being set up and there is no chance to restore the lr register to the value 94 * stack and frame pointer being set up and there is no chance to
89 * before mcount was called. 95 * restore the lr register to the value before mcount was called.
96 *
97 * The asm() bodies of naked functions often depend on standard calling
98 * conventions, therefore they must be noinline and noclone.
90 * 99 *
91 * The asm() bodies of naked functions often depend on standard calling conventions, 100 * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
92 * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce 101 * See GCC PR44290.
93 * this, so we must do so ourselves. See GCC PR44290.
94 */ 102 */
95#define __naked __attribute__((naked)) noinline __noclone notrace 103#define __naked __attribute__((naked)) noinline __noclone notrace
96 104
97#define __noreturn __attribute__((noreturn)) 105#define __noreturn __attribute__((noreturn))
98 106
99/* 107/*
100 * From the GCC manual: 108 * From the GCC manual:
@@ -106,19 +114,130 @@
106 * would be. 114 * would be.
107 * [...] 115 * [...]
108 */ 116 */
109#define __pure __attribute__((pure)) 117#define __pure __attribute__((pure))
110#define __aligned(x) __attribute__((aligned(x))) 118#define __aligned(x) __attribute__((aligned(x)))
111#define __printf(a, b) __attribute__((format(printf, a, b))) 119#define __printf(a, b) __attribute__((format(printf, a, b)))
112#define __scanf(a, b) __attribute__((format(scanf, a, b))) 120#define __scanf(a, b) __attribute__((format(scanf, a, b)))
113#define noinline __attribute__((noinline)) 121#define __attribute_const__ __attribute__((__const__))
114#define __attribute_const__ __attribute__((__const__)) 122#define __maybe_unused __attribute__((unused))
115#define __maybe_unused __attribute__((unused)) 123#define __always_unused __attribute__((unused))
116#define __always_unused __attribute__((unused)) 124
117 125/* gcc version specific checks */
118#define __gcc_header(x) #x 126
119#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) 127#if GCC_VERSION < 30200
120#define gcc_header(x) _gcc_header(x) 128# error Sorry, your compiler is too old - please upgrade it.
121#include gcc_header(__GNUC__) 129#endif
130
131#if GCC_VERSION < 30300
132# define __used __attribute__((__unused__))
133#else
134# define __used __attribute__((__used__))
135#endif
136
137#ifdef CONFIG_GCOV_KERNEL
138# if GCC_VERSION < 30400
139# error "GCOV profiling support for gcc versions below 3.4 not included"
140# endif /* __GNUC_MINOR__ */
141#endif /* CONFIG_GCOV_KERNEL */
142
143#if GCC_VERSION >= 30400
144#define __must_check __attribute__((warn_unused_result))
145#endif
146
147#if GCC_VERSION >= 40000
148
149/* GCC 4.1.[01] miscompiles __weak */
150#ifdef __KERNEL__
151# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
152# error Your version of gcc miscompiles the __weak directive
153# endif
154#endif
155
156#define __used __attribute__((__used__))
157#define __compiler_offsetof(a, b) \
158 __builtin_offsetof(a, b)
159
160#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
161# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
162#endif
163
164#if GCC_VERSION >= 40300
165/* Mark functions as cold. gcc will assume any path leading to a call
166 * to them will be unlikely. This means a lot of manual unlikely()s
167 * are unnecessary now for any paths leading to the usual suspects
168 * like BUG(), printk(), panic() etc. [but let's keep them for now for
169 * older compilers]
170 *
171 * Early snapshots of gcc 4.3 don't support this and we can't detect this
172 * in the preprocessor, but we can live with this because they're unreleased.
173 * Maketime probing would be overkill here.
174 *
175 * gcc also has a __attribute__((__hot__)) to move hot functions into
176 * a special section, but I don't see any sense in this right now in
177 * the kernel context
178 */
179#define __cold __attribute__((__cold__))
180
181#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
182
183#ifndef __CHECKER__
184# define __compiletime_warning(message) __attribute__((warning(message)))
185# define __compiletime_error(message) __attribute__((error(message)))
186#endif /* __CHECKER__ */
187#endif /* GCC_VERSION >= 40300 */
188
189#if GCC_VERSION >= 40500
190/*
191 * Mark a position in code as unreachable. This can be used to
192 * suppress control flow warnings after asm blocks that transfer
193 * control elsewhere.
194 *
195 * Early snapshots of gcc 4.5 don't support this and we can't detect
196 * this in the preprocessor, but we can live with this because they're
197 * unreleased. Really, we need to have autoconf for the kernel.
198 */
199#define unreachable() __builtin_unreachable()
200
201/* Mark a function definition as prohibited from being cloned. */
202#define __noclone __attribute__((__noclone__))
203
204#endif /* GCC_VERSION >= 40500 */
205
206#if GCC_VERSION >= 40600
207/*
208 * Tell the optimizer that something else uses this function or variable.
209 */
210#define __visible __attribute__((externally_visible))
211#endif
212
213/*
214 * GCC 'asm goto' miscompiles certain code sequences:
215 *
216 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
217 *
218 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
219 *
220 * (asm goto is automatically volatile - the naming reflects this.)
221 */
222#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
223
224#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
225#if GCC_VERSION >= 40400
226#define __HAVE_BUILTIN_BSWAP32__
227#define __HAVE_BUILTIN_BSWAP64__
228#endif
229#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
230#define __HAVE_BUILTIN_BSWAP16__
231#endif
232#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
233
234#if GCC_VERSION >= 50000
235#define KASAN_ABI_VERSION 4
236#elif GCC_VERSION >= 40902
237#define KASAN_ABI_VERSION 3
238#endif
239
240#endif /* gcc version >= 40000 specific checks */
122 241
123#if !defined(__noclone) 242#if !defined(__noclone)
124#define __noclone /* not needed */ 243#define __noclone /* not needed */
@@ -129,5 +248,3 @@
129 * code 248 * code
130 */ 249 */
131#define uninitialized_var(x) x = x 250#define uninitialized_var(x) x = x
132
133#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
deleted file mode 100644
index 7d89febe4d79..000000000000
--- a/include/linux/compiler-gcc3.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef __LINUX_COMPILER_H
2#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
3#endif
4
5#if GCC_VERSION < 30200
6# error Sorry, your compiler is too old - please upgrade it.
7#endif
8
9#if GCC_VERSION >= 30300
10# define __used __attribute__((__used__))
11#else
12# define __used __attribute__((__unused__))
13#endif
14
15#if GCC_VERSION >= 30400
16#define __must_check __attribute__((warn_unused_result))
17#endif
18
19#ifdef CONFIG_GCOV_KERNEL
20# if GCC_VERSION < 30400
21# error "GCOV profiling support for gcc versions below 3.4 not included"
22# endif /* __GNUC_MINOR__ */
23#endif /* CONFIG_GCOV_KERNEL */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
deleted file mode 100644
index 769e19864632..000000000000
--- a/include/linux/compiler-gcc4.h
+++ /dev/null
@@ -1,91 +0,0 @@
1#ifndef __LINUX_COMPILER_H
2#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
3#endif
4
5/* GCC 4.1.[01] miscompiles __weak */
6#ifdef __KERNEL__
7# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
8# error Your version of gcc miscompiles the __weak directive
9# endif
10#endif
11
12#define __used __attribute__((__used__))
13#define __must_check __attribute__((warn_unused_result))
14#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
15
16#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
17# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
18#endif
19
20#if GCC_VERSION >= 40300
21/* Mark functions as cold. gcc will assume any path leading to a call
22 to them will be unlikely. This means a lot of manual unlikely()s
23 are unnecessary now for any paths leading to the usual suspects
24 like BUG(), printk(), panic() etc. [but let's keep them for now for
25 older compilers]
26
27 Early snapshots of gcc 4.3 don't support this and we can't detect this
28 in the preprocessor, but we can live with this because they're unreleased.
29 Maketime probing would be overkill here.
30
31 gcc also has a __attribute__((__hot__)) to move hot functions into
32 a special section, but I don't see any sense in this right now in
33 the kernel context */
34#define __cold __attribute__((__cold__))
35
36#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
37
38#ifndef __CHECKER__
39# define __compiletime_warning(message) __attribute__((warning(message)))
40# define __compiletime_error(message) __attribute__((error(message)))
41#endif /* __CHECKER__ */
42#endif /* GCC_VERSION >= 40300 */
43
44#if GCC_VERSION >= 40500
45/*
46 * Mark a position in code as unreachable. This can be used to
47 * suppress control flow warnings after asm blocks that transfer
48 * control elsewhere.
49 *
50 * Early snapshots of gcc 4.5 don't support this and we can't detect
51 * this in the preprocessor, but we can live with this because they're
52 * unreleased. Really, we need to have autoconf for the kernel.
53 */
54#define unreachable() __builtin_unreachable()
55
56/* Mark a function definition as prohibited from being cloned. */
57#define __noclone __attribute__((__noclone__))
58
59#endif /* GCC_VERSION >= 40500 */
60
61#if GCC_VERSION >= 40600
62/*
63 * Tell the optimizer that something else uses this function or variable.
64 */
65#define __visible __attribute__((externally_visible))
66#endif
67
68/*
69 * GCC 'asm goto' miscompiles certain code sequences:
70 *
71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
72 *
73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
74 *
75 * (asm goto is automatically volatile - the naming reflects this.)
76 */
77#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
78
79#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
80#if GCC_VERSION >= 40400
81#define __HAVE_BUILTIN_BSWAP32__
82#define __HAVE_BUILTIN_BSWAP64__
83#endif
84#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
85#define __HAVE_BUILTIN_BSWAP16__
86#endif
87#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
88
89#if GCC_VERSION >= 40902
90#define KASAN_ABI_VERSION 3
91#endif
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
deleted file mode 100644
index efee493714eb..000000000000
--- a/include/linux/compiler-gcc5.h
+++ /dev/null
@@ -1,67 +0,0 @@
1#ifndef __LINUX_COMPILER_H
2#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
3#endif
4
5#define __used __attribute__((__used__))
6#define __must_check __attribute__((warn_unused_result))
7#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
8
9/* Mark functions as cold. gcc will assume any path leading to a call
10 to them will be unlikely. This means a lot of manual unlikely()s
11 are unnecessary now for any paths leading to the usual suspects
12 like BUG(), printk(), panic() etc. [but let's keep them for now for
13 older compilers]
14
15 Early snapshots of gcc 4.3 don't support this and we can't detect this
16 in the preprocessor, but we can live with this because they're unreleased.
17 Maketime probing would be overkill here.
18
19 gcc also has a __attribute__((__hot__)) to move hot functions into
20 a special section, but I don't see any sense in this right now in
21 the kernel context */
22#define __cold __attribute__((__cold__))
23
24#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
25
26#ifndef __CHECKER__
27# define __compiletime_warning(message) __attribute__((warning(message)))
28# define __compiletime_error(message) __attribute__((error(message)))
29#endif /* __CHECKER__ */
30
31/*
32 * Mark a position in code as unreachable. This can be used to
33 * suppress control flow warnings after asm blocks that transfer
34 * control elsewhere.
35 *
36 * Early snapshots of gcc 4.5 don't support this and we can't detect
37 * this in the preprocessor, but we can live with this because they're
38 * unreleased. Really, we need to have autoconf for the kernel.
39 */
40#define unreachable() __builtin_unreachable()
41
42/* Mark a function definition as prohibited from being cloned. */
43#define __noclone __attribute__((__noclone__))
44
45/*
46 * Tell the optimizer that something else uses this function or variable.
47 */
48#define __visible __attribute__((externally_visible))
49
50/*
51 * GCC 'asm goto' miscompiles certain code sequences:
52 *
53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
54 *
55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
56 *
57 * (asm goto is automatically volatile - the naming reflects this.)
58 */
59#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
60
61#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
62#define __HAVE_BUILTIN_BSWAP32__
63#define __HAVE_BUILTIN_BSWAP64__
64#define __HAVE_BUILTIN_BSWAP16__
65#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
66
67#define KASAN_ABI_VERSION 4
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 0c9a2f2c2802..d4c71132d07f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -13,10 +13,12 @@
13/* Intel ECC compiler doesn't support gcc specific asm stmts. 13/* Intel ECC compiler doesn't support gcc specific asm stmts.
14 * It uses intrinsics to do the equivalent things. 14 * It uses intrinsics to do the equivalent things.
15 */ 15 */
16#undef barrier
16#undef barrier_data 17#undef barrier_data
17#undef RELOC_HIDE 18#undef RELOC_HIDE
18#undef OPTIMIZER_HIDE_VAR 19#undef OPTIMIZER_HIDE_VAR
19 20
21#define barrier() __memory_barrier()
20#define barrier_data(ptr) barrier() 22#define barrier_data(ptr) barrier()
21 23
22#define RELOC_HIDE(ptr, off) \ 24#define RELOC_HIDE(ptr, off) \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 05be2352fef8..e08a6ae7c0a4 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -17,6 +17,7 @@
17# define __release(x) __context__(x,-1) 17# define __release(x) __context__(x,-1)
18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19# define __percpu __attribute__((noderef, address_space(3))) 19# define __percpu __attribute__((noderef, address_space(3)))
20# define __pmem __attribute__((noderef, address_space(5)))
20#ifdef CONFIG_SPARSE_RCU_POINTER 21#ifdef CONFIG_SPARSE_RCU_POINTER
21# define __rcu __attribute__((noderef, address_space(4))) 22# define __rcu __attribute__((noderef, address_space(4)))
22#else 23#else
@@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
42# define __cond_lock(x,c) (c) 43# define __cond_lock(x,c) (c)
43# define __percpu 44# define __percpu
44# define __rcu 45# define __rcu
46# define __pmem
45#endif 47#endif
46 48
47/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 49/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
@@ -473,6 +475,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
473 (volatile typeof(x) *)&(x); }) 475 (volatile typeof(x) *)&(x); })
474#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) 476#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
475 477
478/**
479 * lockless_dereference() - safely load a pointer for later dereference
480 * @p: The pointer to load
481 *
482 * Similar to rcu_dereference(), but for situations where the pointed-to
483 * object's lifetime is managed by something other than RCU. That
484 * "something other" might be reference counting or simple immortality.
485 */
486#define lockless_dereference(p) \
487({ \
488 typeof(p) _________p1 = READ_ONCE(p); \
489 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
490 (_________p1); \
491})
492
476/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ 493/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
477#ifdef CONFIG_KPROBES 494#ifdef CONFIG_KPROBES
478# define __kprobes __attribute__((__section__(".kprobes.text"))) 495# define __kprobes __attribute__((__section__(".kprobes.text")))
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 34025df61829..63a36e89d0eb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -64,14 +64,14 @@ struct config_item {
64 struct dentry *ci_dentry; 64 struct dentry *ci_dentry;
65}; 65};
66 66
67extern int config_item_set_name(struct config_item *, const char *, ...); 67extern __printf(2, 3)
68int config_item_set_name(struct config_item *, const char *, ...);
68 69
69static inline char *config_item_name(struct config_item * item) 70static inline char *config_item_name(struct config_item * item)
70{ 71{
71 return item->ci_name; 72 return item->ci_name;
72} 73}
73 74
74extern void config_item_init(struct config_item *);
75extern void config_item_init_type_name(struct config_item *item, 75extern void config_item_init_type_name(struct config_item *item,
76 const char *name, 76 const char *name,
77 struct config_item_type *type); 77 struct config_item_type *type);
diff --git a/include/linux/console.h b/include/linux/console.h
index 9f50fb413c11..bd194343c346 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -115,6 +115,7 @@ static inline int con_debug_leave(void)
115#define CON_BOOT (8) 115#define CON_BOOT (8)
116#define CON_ANYTIME (16) /* Safe to call when cpu is offline */ 116#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
117#define CON_BRL (32) /* Used for a braille device */ 117#define CON_BRL (32) /* Used for a braille device */
118#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
118 119
119struct console { 120struct console {
120 char name[16]; 121 char name[16];
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index e859c98d1767..e329ee2667e1 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -104,6 +104,7 @@ struct vc_data {
104 unsigned int vc_resize_user; /* resize request from user */ 104 unsigned int vc_resize_user; /* resize request from user */
105 unsigned int vc_bell_pitch; /* Console bell pitch */ 105 unsigned int vc_bell_pitch; /* Console bell pitch */
106 unsigned int vc_bell_duration; /* Console bell duration */ 106 unsigned int vc_bell_duration; /* Console bell duration */
107 unsigned short vc_cur_blink_ms; /* Cursor blink duration */
107 struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ 108 struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
108 struct uni_pagedir *vc_uni_pagedir; 109 struct uni_pagedir *vc_uni_pagedir;
109 struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ 110 struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 76abba4b238e..dcacb1a72e26 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
340 __u64 mm_reg_addr; 340 __u64 mm_reg_addr;
341}; 341};
342 342
343/* Memory Error Section */ 343/* Old Memory Error Section UEFI 2.1, 2.2 */
344struct cper_sec_mem_err_old {
345 __u64 validation_bits;
346 __u64 error_status;
347 __u64 physical_addr;
348 __u64 physical_addr_mask;
349 __u16 node;
350 __u16 card;
351 __u16 module;
352 __u16 bank;
353 __u16 device;
354 __u16 row;
355 __u16 column;
356 __u16 bit_pos;
357 __u64 requestor_id;
358 __u64 responder_id;
359 __u64 target_id;
360 __u8 error_type;
361};
362
363/* Memory Error Section UEFI >= 2.3 */
344struct cper_sec_mem_err { 364struct cper_sec_mem_err {
345 __u64 validation_bits; 365 __u64 validation_bits;
346 __u64 error_status; 366 __u64 error_status;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c0fb6b1b4712..23c30bdcca86 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
40extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 40extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
41extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 41extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
42 42
43extern struct device *cpu_device_create(struct device *parent, void *drvdata, 43extern __printf(4, 5)
44 const struct attribute_group **groups, 44struct device *cpu_device_create(struct device *parent, void *drvdata,
45 const char *fmt, ...); 45 const struct attribute_group **groups,
46 const char *fmt, ...);
46#ifdef CONFIG_HOTPLUG_CPU 47#ifdef CONFIG_HOTPLUG_CPU
47extern void unregister_cpu(struct cpu *cpu); 48extern void unregister_cpu(struct cpu *cpu);
48extern ssize_t arch_cpu_probe(const char *, size_t); 49extern ssize_t arch_cpu_probe(const char *, size_t);
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index bd955270d5aa..c156f5082758 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -28,6 +28,9 @@
28#include <linux/thermal.h> 28#include <linux/thermal.h>
29#include <linux/cpumask.h> 29#include <linux/cpumask.h>
30 30
31typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
32 unsigned long voltage, u32 *power);
33
31#ifdef CONFIG_CPU_THERMAL 34#ifdef CONFIG_CPU_THERMAL
32/** 35/**
33 * cpufreq_cooling_register - function to create cpufreq cooling device. 36 * cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -36,6 +39,10 @@
36struct thermal_cooling_device * 39struct thermal_cooling_device *
37cpufreq_cooling_register(const struct cpumask *clip_cpus); 40cpufreq_cooling_register(const struct cpumask *clip_cpus);
38 41
42struct thermal_cooling_device *
43cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
44 u32 capacitance, get_static_t plat_static_func);
45
39/** 46/**
40 * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. 47 * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
41 * @np: a valid struct device_node to the cooling device device tree node. 48 * @np: a valid struct device_node to the cooling device device tree node.
@@ -45,6 +52,12 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus);
45struct thermal_cooling_device * 52struct thermal_cooling_device *
46of_cpufreq_cooling_register(struct device_node *np, 53of_cpufreq_cooling_register(struct device_node *np,
47 const struct cpumask *clip_cpus); 54 const struct cpumask *clip_cpus);
55
56struct thermal_cooling_device *
57of_cpufreq_power_cooling_register(struct device_node *np,
58 const struct cpumask *clip_cpus,
59 u32 capacitance,
60 get_static_t plat_static_func);
48#else 61#else
49static inline struct thermal_cooling_device * 62static inline struct thermal_cooling_device *
50of_cpufreq_cooling_register(struct device_node *np, 63of_cpufreq_cooling_register(struct device_node *np,
@@ -52,6 +65,15 @@ of_cpufreq_cooling_register(struct device_node *np,
52{ 65{
53 return ERR_PTR(-ENOSYS); 66 return ERR_PTR(-ENOSYS);
54} 67}
68
69static inline struct thermal_cooling_device *
70of_cpufreq_power_cooling_register(struct device_node *np,
71 const struct cpumask *clip_cpus,
72 u32 capacitance,
73 get_static_t plat_static_func)
74{
75 return NULL;
76}
55#endif 77#endif
56 78
57/** 79/**
@@ -68,11 +90,28 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
68 return ERR_PTR(-ENOSYS); 90 return ERR_PTR(-ENOSYS);
69} 91}
70static inline struct thermal_cooling_device * 92static inline struct thermal_cooling_device *
93cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
94 u32 capacitance, get_static_t plat_static_func)
95{
96 return NULL;
97}
98
99static inline struct thermal_cooling_device *
71of_cpufreq_cooling_register(struct device_node *np, 100of_cpufreq_cooling_register(struct device_node *np,
72 const struct cpumask *clip_cpus) 101 const struct cpumask *clip_cpus)
73{ 102{
74 return ERR_PTR(-ENOSYS); 103 return ERR_PTR(-ENOSYS);
75} 104}
105
106static inline struct thermal_cooling_device *
107of_cpufreq_power_cooling_register(struct device_node *np,
108 const struct cpumask *clip_cpus,
109 u32 capacitance,
110 get_static_t plat_static_func)
111{
112 return NULL;
113}
114
76static inline 115static inline
77void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 116void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
78{ 117{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 2ee4888c1f47..bde1e567b3a9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -62,10 +62,13 @@ struct cpufreq_policy {
62 /* CPUs sharing clock, require sw coordination */ 62 /* CPUs sharing clock, require sw coordination */
63 cpumask_var_t cpus; /* Online CPUs only */ 63 cpumask_var_t cpus; /* Online CPUs only */
64 cpumask_var_t related_cpus; /* Online + Offline CPUs */ 64 cpumask_var_t related_cpus; /* Online + Offline CPUs */
65 cpumask_var_t real_cpus; /* Related and present */
65 66
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 67 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 68 should set cpufreq */
68 unsigned int cpu; /* cpu nr of CPU managing this policy */ 69 unsigned int cpu; /* cpu managing this policy, must be online */
70 unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
71
69 struct clk *clk; 72 struct clk *clk;
70 struct cpufreq_cpuinfo cpuinfo;/* see above */ 73 struct cpufreq_cpuinfo cpuinfo;/* see above */
71 74
@@ -80,6 +83,7 @@ struct cpufreq_policy {
80 struct cpufreq_governor *governor; /* see below */ 83 struct cpufreq_governor *governor; /* see below */
81 void *governor_data; 84 void *governor_data;
82 bool governor_enabled; /* governor start/stop flag */ 85 bool governor_enabled; /* governor start/stop flag */
86 char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
83 87
84 struct work_struct update; /* if update_policy() needs to be 88 struct work_struct update; /* if update_policy() needs to be
85 * called, but you're in IRQ context */ 89 * called, but you're in IRQ context */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 9c5e89254796..d075d34279df 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -151,10 +151,6 @@ extern void cpuidle_resume(void);
151extern int cpuidle_enable_device(struct cpuidle_device *dev); 151extern int cpuidle_enable_device(struct cpuidle_device *dev);
152extern void cpuidle_disable_device(struct cpuidle_device *dev); 152extern void cpuidle_disable_device(struct cpuidle_device *dev);
153extern int cpuidle_play_dead(void); 153extern int cpuidle_play_dead(void);
154extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
155 struct cpuidle_device *dev);
156extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
157 struct cpuidle_device *dev);
158 154
159extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 155extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
160#else 156#else
@@ -190,16 +186,28 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
190{return -ENODEV; } 186{return -ENODEV; }
191static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 187static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
192static inline int cpuidle_play_dead(void) {return -ENODEV; } 188static inline int cpuidle_play_dead(void) {return -ENODEV; }
189static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
190 struct cpuidle_device *dev) {return NULL; }
191#endif
192
193#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
194extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
195 struct cpuidle_device *dev);
196extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
197 struct cpuidle_device *dev);
198#else
193static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 199static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
194 struct cpuidle_device *dev) 200 struct cpuidle_device *dev)
195{return -ENODEV; } 201{return -ENODEV; }
196static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, 202static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
197 struct cpuidle_device *dev) 203 struct cpuidle_device *dev)
198{return -ENODEV; } 204{return -ENODEV; }
199static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
200 struct cpuidle_device *dev) {return NULL; }
201#endif 205#endif
202 206
207/* kernel/sched/idle.c */
208extern void sched_idle_set_state(struct cpuidle_state *idle_state);
209extern void default_idle_call(void);
210
203#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 211#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
204void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 212void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
205#else 213#else
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index 84920f3cc83e..a9953c762eee 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implements the standard CRC ITU-T V.41: 4 * Implements the standard CRC ITU-T V.41:
5 * Width 16 5 * Width 16
6 * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1) 6 * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
7 * Init 0 7 * Init 0
8 * 8 *
9 * This source code is licensed under the GNU General Public License, 9 * This source code is licensed under the GNU General Public License,
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index cf53d0773ce3..d81961e9e37d 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -9,5 +9,6 @@
9extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, 9extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
10 size_t len); 10 size_t len);
11extern __u16 crc_t10dif(unsigned char const *, size_t); 11extern __u16 crc_t10dif(unsigned char const *, size_t);
12extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
12 13
13#endif 14#endif
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 48a1a7d100f1..48b49305716b 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -1,7 +1,11 @@
1#ifndef CEPH_CRUSH_CRUSH_H 1#ifndef CEPH_CRUSH_CRUSH_H
2#define CEPH_CRUSH_CRUSH_H 2#define CEPH_CRUSH_CRUSH_H
3 3
4#include <linux/types.h> 4#ifdef __KERNEL__
5# include <linux/types.h>
6#else
7# include "crush_compat.h"
8#endif
5 9
6/* 10/*
7 * CRUSH is a pseudo-random data distribution algorithm that 11 * CRUSH is a pseudo-random data distribution algorithm that
@@ -20,7 +24,11 @@
20#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ 24#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
21 25
22#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ 26#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
27#define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */
28#define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */
23 29
30#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
31#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
24 32
25#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */ 33#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
26#define CRUSH_ITEM_NONE 0x7fffffff /* no result */ 34#define CRUSH_ITEM_NONE 0x7fffffff /* no result */
@@ -108,6 +116,15 @@ enum {
108}; 116};
109extern const char *crush_bucket_alg_name(int alg); 117extern const char *crush_bucket_alg_name(int alg);
110 118
119/*
120 * although tree was a legacy algorithm, it has been buggy, so
121 * exclude it.
122 */
123#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \
124 (1 << CRUSH_BUCKET_UNIFORM) | \
125 (1 << CRUSH_BUCKET_LIST) | \
126 (1 << CRUSH_BUCKET_STRAW))
127
111struct crush_bucket { 128struct crush_bucket {
112 __s32 id; /* this'll be negative */ 129 __s32 id; /* this'll be negative */
113 __u16 type; /* non-zero; type=0 is reserved for devices */ 130 __u16 type; /* non-zero; type=0 is reserved for devices */
@@ -174,7 +191,7 @@ struct crush_map {
174 /* choose local attempts using a fallback permutation before 191 /* choose local attempts using a fallback permutation before
175 * re-descent */ 192 * re-descent */
176 __u32 choose_local_fallback_tries; 193 __u32 choose_local_fallback_tries;
177 /* choose attempts before giving up */ 194 /* choose attempts before giving up */
178 __u32 choose_total_tries; 195 __u32 choose_total_tries;
179 /* attempt chooseleaf inner descent once for firstn mode; on 196 /* attempt chooseleaf inner descent once for firstn mode; on
180 * reject retry outer descent. Note that this does *not* 197 * reject retry outer descent. Note that this does *not*
@@ -187,6 +204,25 @@ struct crush_map {
187 * that want to limit reshuffling, a value of 3 or 4 will make the 204 * that want to limit reshuffling, a value of 3 or 4 will make the
188 * mappings line up a bit better with previous mappings. */ 205 * mappings line up a bit better with previous mappings. */
189 __u8 chooseleaf_vary_r; 206 __u8 chooseleaf_vary_r;
207
208#ifndef __KERNEL__
209 /*
210 * version 0 (original) of straw_calc has various flaws. version 1
211 * fixes a few of them.
212 */
213 __u8 straw_calc_version;
214
215 /*
216 * allowed bucket algs is a bitmask, here the bit positions
217 * are CRUSH_BUCKET_*. note that these are *bits* and
218 * CRUSH_BUCKET_* values are not, so we need to or together (1
219 * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to
220 * minimize confusion (bucket type values start at 1).
221 */
222 __u32 allowed_bucket_algs;
223
224 __u32 *choose_tries;
225#endif
190}; 226};
191 227
192 228
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h
index 91e884230d5d..d1d90258242e 100644
--- a/include/linux/crush/hash.h
+++ b/include/linux/crush/hash.h
@@ -1,6 +1,12 @@
1#ifndef CEPH_CRUSH_HASH_H 1#ifndef CEPH_CRUSH_HASH_H
2#define CEPH_CRUSH_HASH_H 2#define CEPH_CRUSH_HASH_H
3 3
4#ifdef __KERNEL__
5# include <linux/types.h>
6#else
7# include "crush_compat.h"
8#endif
9
4#define CRUSH_HASH_RJENKINS1 0 10#define CRUSH_HASH_RJENKINS1 0
5 11
6#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 12#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index eab367446eea..5dfd5b1125d2 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -8,7 +8,7 @@
8 * LGPL2 8 * LGPL2
9 */ 9 */
10 10
11#include <linux/crush/crush.h> 11#include "crush.h"
12 12
13extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); 13extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
14extern int crush_do_rule(const struct crush_map *map, 14extern int crush_do_rule(const struct crush_map *map,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index df334cbacc6d..d67ae119cf4e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -160,6 +160,7 @@ struct dentry_operations {
160 char *(*d_dname)(struct dentry *, char *, int); 160 char *(*d_dname)(struct dentry *, char *, int);
161 struct vfsmount *(*d_automount)(struct path *); 161 struct vfsmount *(*d_automount)(struct path *);
162 int (*d_manage)(struct dentry *, bool); 162 int (*d_manage)(struct dentry *, bool);
163 struct inode *(*d_select_inode)(struct dentry *, unsigned);
163} ____cacheline_aligned; 164} ____cacheline_aligned;
164 165
165/* 166/*
@@ -225,6 +226,7 @@ struct dentry_operations {
225 226
226#define DCACHE_MAY_FREE 0x00800000 227#define DCACHE_MAY_FREE 0x00800000
227#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ 228#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
229#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
228 230
229extern seqlock_t rename_lock; 231extern seqlock_t rename_lock;
230 232
@@ -325,7 +327,8 @@ static inline unsigned d_count(const struct dentry *dentry)
325/* 327/*
326 * helper function for dentry_operations.d_dname() members 328 * helper function for dentry_operations.d_dname() members
327 */ 329 */
328extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 330extern __printf(4, 5)
331char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
329extern char *simple_dname(struct dentry *, char *, int); 332extern char *simple_dname(struct dentry *, char *, int);
330 333
331extern char *__d_path(const struct path *, const struct path *, char *, int); 334extern char *__d_path(const struct path *, const struct path *, char *, int);
@@ -505,6 +508,11 @@ static inline bool d_really_is_positive(const struct dentry *dentry)
505 return dentry->d_inode != NULL; 508 return dentry->d_inode != NULL;
506} 509}
507 510
511static inline int simple_positive(struct dentry *dentry)
512{
513 return d_really_is_positive(dentry) && !d_unhashed(dentry);
514}
515
508extern void d_set_fallthru(struct dentry *dentry); 516extern void d_set_fallthru(struct dentry *dentry);
509 517
510static inline bool d_is_fallthru(const struct dentry *dentry) 518static inline bool d_is_fallthru(const struct dentry *dentry)
diff --git a/include/linux/device.h b/include/linux/device.h
index 6558af90c8fe..a2b4ea70a946 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -196,12 +196,41 @@ extern struct kset *bus_get_kset(struct bus_type *bus);
196extern struct klist *bus_get_device_klist(struct bus_type *bus); 196extern struct klist *bus_get_device_klist(struct bus_type *bus);
197 197
198/** 198/**
199 * enum probe_type - device driver probe type to try
200 * Device drivers may opt in for special handling of their
201 * respective probe routines. This tells the core what to
202 * expect and prefer.
203 *
204 * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
205 * whether probed synchronously or asynchronously.
206 * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
207 * probing order is not essential for booting the system may
208 * opt into executing their probes asynchronously.
209 * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
210 * their probe routines to run synchronously with driver and
211 * device registration (with the exception of -EPROBE_DEFER
212 * handling - re-probing always ends up being done asynchronously).
213 *
214 * Note that the end goal is to switch the kernel to use asynchronous
215 * probing by default, so annotating drivers with
216 * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
217 * to speed up boot process while we are validating the rest of the
218 * drivers.
219 */
220enum probe_type {
221 PROBE_DEFAULT_STRATEGY,
222 PROBE_PREFER_ASYNCHRONOUS,
223 PROBE_FORCE_SYNCHRONOUS,
224};
225
226/**
199 * struct device_driver - The basic device driver structure 227 * struct device_driver - The basic device driver structure
200 * @name: Name of the device driver. 228 * @name: Name of the device driver.
201 * @bus: The bus which the device of this driver belongs to. 229 * @bus: The bus which the device of this driver belongs to.
202 * @owner: The module owner. 230 * @owner: The module owner.
203 * @mod_name: Used for built-in modules. 231 * @mod_name: Used for built-in modules.
204 * @suppress_bind_attrs: Disables bind/unbind via sysfs. 232 * @suppress_bind_attrs: Disables bind/unbind via sysfs.
233 * @probe_type: Type of the probe (synchronous or asynchronous) to use.
205 * @of_match_table: The open firmware table. 234 * @of_match_table: The open firmware table.
206 * @acpi_match_table: The ACPI match table. 235 * @acpi_match_table: The ACPI match table.
207 * @probe: Called to query the existence of a specific device, 236 * @probe: Called to query the existence of a specific device,
@@ -235,6 +264,7 @@ struct device_driver {
235 const char *mod_name; /* used for built-in modules */ 264 const char *mod_name; /* used for built-in modules */
236 265
237 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ 266 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
267 enum probe_type probe_type;
238 268
239 const struct of_device_id *of_match_table; 269 const struct of_device_id *of_match_table;
240 const struct acpi_device_id *acpi_match_table; 270 const struct acpi_device_id *acpi_match_table;
@@ -607,8 +637,9 @@ extern int devres_release_group(struct device *dev, void *id);
607 637
608/* managed devm_k.alloc/kfree for device drivers */ 638/* managed devm_k.alloc/kfree for device drivers */
609extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); 639extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
610extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 640extern __printf(3, 0)
611 va_list ap); 641char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
642 va_list ap);
612extern __printf(3, 4) 643extern __printf(3, 4)
613char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); 644char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
614static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 645static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
@@ -975,17 +1006,16 @@ extern int __must_check device_bind_driver(struct device *dev);
975extern void device_release_driver(struct device *dev); 1006extern void device_release_driver(struct device *dev);
976extern int __must_check device_attach(struct device *dev); 1007extern int __must_check device_attach(struct device *dev);
977extern int __must_check driver_attach(struct device_driver *drv); 1008extern int __must_check driver_attach(struct device_driver *drv);
1009extern void device_initial_probe(struct device *dev);
978extern int __must_check device_reprobe(struct device *dev); 1010extern int __must_check device_reprobe(struct device *dev);
979 1011
980/* 1012/*
981 * Easy functions for dynamically creating devices on the fly 1013 * Easy functions for dynamically creating devices on the fly
982 */ 1014 */
983extern struct device *device_create_vargs(struct class *cls, 1015extern __printf(5, 0)
984 struct device *parent, 1016struct device *device_create_vargs(struct class *cls, struct device *parent,
985 dev_t devt, 1017 dev_t devt, void *drvdata,
986 void *drvdata, 1018 const char *fmt, va_list vargs);
987 const char *fmt,
988 va_list vargs);
989extern __printf(5, 6) 1019extern __printf(5, 6)
990struct device *device_create(struct class *cls, struct device *parent, 1020struct device *device_create(struct class *cls, struct device *parent,
991 dev_t devt, void *drvdata, 1021 dev_t devt, void *drvdata,
@@ -1269,4 +1299,26 @@ static void __exit __driver##_exit(void) \
1269} \ 1299} \
1270module_exit(__driver##_exit); 1300module_exit(__driver##_exit);
1271 1301
1302/**
1303 * builtin_driver() - Helper macro for drivers that don't do anything
1304 * special in init and have no exit. This eliminates some boilerplate.
1305 * Each driver may only use this macro once, and calling it replaces
1306 * device_initcall (or in some cases, the legacy __initcall). This is
1307 * meant to be a direct parallel of module_driver() above but without
1308 * the __exit stuff that is not used for builtin cases.
1309 *
1310 * @__driver: driver name
1311 * @__register: register function for this driver type
1312 * @...: Additional arguments to be passed to __register
1313 *
1314 * Use this macro to construct bus specific macros for registering
1315 * drivers, and do not use it on its own.
1316 */
1317#define builtin_driver(__driver, __register, ...) \
1318static int __init __driver##_init(void) \
1319{ \
1320 return __register(&(__driver) , ##__VA_ARGS__); \
1321} \
1322device_initcall(__driver##_init);
1323
1272#endif /* _DEVICE_H_ */ 1324#endif /* _DEVICE_H_ */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 2f0b431b73e0..f98bd7068d55 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -115,6 +115,8 @@ struct dma_buf_ops {
115 * @attachments: list of dma_buf_attachment that denotes all devices attached. 115 * @attachments: list of dma_buf_attachment that denotes all devices attached.
116 * @ops: dma_buf_ops associated with this buffer object. 116 * @ops: dma_buf_ops associated with this buffer object.
117 * @exp_name: name of the exporter; useful for debugging. 117 * @exp_name: name of the exporter; useful for debugging.
118 * @owner: pointer to exporter module; used for refcounting when exporter is a
119 * kernel module.
118 * @list_node: node for dma_buf accounting and debugging. 120 * @list_node: node for dma_buf accounting and debugging.
119 * @priv: exporter specific private data for this buffer object. 121 * @priv: exporter specific private data for this buffer object.
120 * @resv: reservation object linked to this dma-buf 122 * @resv: reservation object linked to this dma-buf
@@ -129,6 +131,7 @@ struct dma_buf {
129 unsigned vmapping_counter; 131 unsigned vmapping_counter;
130 void *vmap_ptr; 132 void *vmap_ptr;
131 const char *exp_name; 133 const char *exp_name;
134 struct module *owner;
132 struct list_head list_node; 135 struct list_head list_node;
133 void *priv; 136 void *priv;
134 struct reservation_object *resv; 137 struct reservation_object *resv;
@@ -164,7 +167,8 @@ struct dma_buf_attachment {
164 167
165/** 168/**
166 * struct dma_buf_export_info - holds information needed to export a dma_buf 169 * struct dma_buf_export_info - holds information needed to export a dma_buf
167 * @exp_name: name of the exporting module - useful for debugging. 170 * @exp_name: name of the exporter - useful for debugging.
171 * @owner: pointer to exporter module - used for refcounting kernel module
168 * @ops: Attach allocator-defined dma buf ops to the new buffer 172 * @ops: Attach allocator-defined dma buf ops to the new buffer
169 * @size: Size of the buffer 173 * @size: Size of the buffer
170 * @flags: mode flags for the file 174 * @flags: mode flags for the file
@@ -176,6 +180,7 @@ struct dma_buf_attachment {
176 */ 180 */
177struct dma_buf_export_info { 181struct dma_buf_export_info {
178 const char *exp_name; 182 const char *exp_name;
183 struct module *owner;
179 const struct dma_buf_ops *ops; 184 const struct dma_buf_ops *ops;
180 size_t size; 185 size_t size;
181 int flags; 186 int flags;
@@ -187,7 +192,8 @@ struct dma_buf_export_info {
187 * helper macro for exporters; zeros and fills in most common values 192 * helper macro for exporters; zeros and fills in most common values
188 */ 193 */
189#define DEFINE_DMA_BUF_EXPORT_INFO(a) \ 194#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
190 struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME } 195 struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
196 .owner = THIS_MODULE }
191 197
192/** 198/**
193 * get_dma_buf - convenience wrapper for get_file. 199 * get_dma_buf - convenience wrapper for get_file.
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
new file mode 100644
index 000000000000..3edc99294bf6
--- /dev/null
+++ b/include/linux/dma/pxa-dma.h
@@ -0,0 +1,27 @@
1#ifndef _PXA_DMA_H_
2#define _PXA_DMA_H_
3
4enum pxad_chan_prio {
5 PXAD_PRIO_HIGHEST = 0,
6 PXAD_PRIO_NORMAL,
7 PXAD_PRIO_LOW,
8 PXAD_PRIO_LOWEST,
9};
10
11struct pxad_param {
12 unsigned int drcmr;
13 enum pxad_chan_prio prio;
14};
15
16struct dma_chan;
17
18#ifdef CONFIG_PXA_DMA
19bool pxad_filter_fn(struct dma_chan *chan, void *param);
20#else
21static inline bool pxad_filter_fn(struct dma_chan *chan, void *param)
22{
23 return false;
24}
25#endif
26
27#endif /* _PXA_DMA_H_ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index ad419757241f..e2f5eb419976 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -65,6 +65,7 @@ enum dma_transaction_type {
65 DMA_PQ, 65 DMA_PQ,
66 DMA_XOR_VAL, 66 DMA_XOR_VAL,
67 DMA_PQ_VAL, 67 DMA_PQ_VAL,
68 DMA_MEMSET,
68 DMA_INTERRUPT, 69 DMA_INTERRUPT,
69 DMA_SG, 70 DMA_SG,
70 DMA_PRIVATE, 71 DMA_PRIVATE,
@@ -122,10 +123,18 @@ enum dma_transfer_direction {
122 * chunk and before first src/dst address for next chunk. 123 * chunk and before first src/dst address for next chunk.
123 * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. 124 * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
124 * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. 125 * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
126 * @dst_icg: Number of bytes to jump after last dst address of this
127 * chunk and before the first dst address for next chunk.
128 * Ignored if dst_inc is true and dst_sgl is false.
129 * @src_icg: Number of bytes to jump after last src address of this
130 * chunk and before the first src address for next chunk.
131 * Ignored if src_inc is true and src_sgl is false.
125 */ 132 */
126struct data_chunk { 133struct data_chunk {
127 size_t size; 134 size_t size;
128 size_t icg; 135 size_t icg;
136 size_t dst_icg;
137 size_t src_icg;
129}; 138};
130 139
131/** 140/**
@@ -222,6 +231,16 @@ struct dma_chan_percpu {
222}; 231};
223 232
224/** 233/**
234 * struct dma_router - DMA router structure
235 * @dev: pointer to the DMA router device
236 * @route_free: function to be called when the route can be disconnected
237 */
238struct dma_router {
239 struct device *dev;
240 void (*route_free)(struct device *dev, void *route_data);
241};
242
243/**
225 * struct dma_chan - devices supply DMA channels, clients use them 244 * struct dma_chan - devices supply DMA channels, clients use them
226 * @device: ptr to the dma device who supplies this channel, always !%NULL 245 * @device: ptr to the dma device who supplies this channel, always !%NULL
227 * @cookie: last cookie value returned to client 246 * @cookie: last cookie value returned to client
@@ -232,6 +251,8 @@ struct dma_chan_percpu {
232 * @local: per-cpu pointer to a struct dma_chan_percpu 251 * @local: per-cpu pointer to a struct dma_chan_percpu
233 * @client_count: how many clients are using this channel 252 * @client_count: how many clients are using this channel
234 * @table_count: number of appearances in the mem-to-mem allocation table 253 * @table_count: number of appearances in the mem-to-mem allocation table
254 * @router: pointer to the DMA router structure
255 * @route_data: channel specific data for the router
235 * @private: private data for certain client-channel associations 256 * @private: private data for certain client-channel associations
236 */ 257 */
237struct dma_chan { 258struct dma_chan {
@@ -247,6 +268,11 @@ struct dma_chan {
247 struct dma_chan_percpu __percpu *local; 268 struct dma_chan_percpu __percpu *local;
248 int client_count; 269 int client_count;
249 int table_count; 270 int table_count;
271
272 /* DMA router */
273 struct dma_router *router;
274 void *route_data;
275
250 void *private; 276 void *private;
251}; 277};
252 278
@@ -570,6 +596,7 @@ struct dma_tx_state {
570 * @copy_align: alignment shift for memcpy operations 596 * @copy_align: alignment shift for memcpy operations
571 * @xor_align: alignment shift for xor operations 597 * @xor_align: alignment shift for xor operations
572 * @pq_align: alignment shift for pq operations 598 * @pq_align: alignment shift for pq operations
599 * @fill_align: alignment shift for memset operations
573 * @dev_id: unique device ID 600 * @dev_id: unique device ID
574 * @dev: struct device reference for dma mapping api 601 * @dev: struct device reference for dma mapping api
575 * @src_addr_widths: bit mask of src addr widths the device supports 602 * @src_addr_widths: bit mask of src addr widths the device supports
@@ -588,6 +615,7 @@ struct dma_tx_state {
588 * @device_prep_dma_xor_val: prepares a xor validation operation 615 * @device_prep_dma_xor_val: prepares a xor validation operation
589 * @device_prep_dma_pq: prepares a pq operation 616 * @device_prep_dma_pq: prepares a pq operation
590 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 617 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
618 * @device_prep_dma_memset: prepares a memset operation
591 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 619 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
592 * @device_prep_slave_sg: prepares a slave dma operation 620 * @device_prep_slave_sg: prepares a slave dma operation
593 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 621 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -620,6 +648,7 @@ struct dma_device {
620 u8 copy_align; 648 u8 copy_align;
621 u8 xor_align; 649 u8 xor_align;
622 u8 pq_align; 650 u8 pq_align;
651 u8 fill_align;
623 #define DMA_HAS_PQ_CONTINUE (1 << 15) 652 #define DMA_HAS_PQ_CONTINUE (1 << 15)
624 653
625 int dev_id; 654 int dev_id;
@@ -650,6 +679,9 @@ struct dma_device {
650 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 679 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
651 unsigned int src_cnt, const unsigned char *scf, size_t len, 680 unsigned int src_cnt, const unsigned char *scf, size_t len,
652 enum sum_check_flags *pqres, unsigned long flags); 681 enum sum_check_flags *pqres, unsigned long flags);
682 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
683 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
684 unsigned long flags);
653 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 685 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
654 struct dma_chan *chan, unsigned long flags); 686 struct dma_chan *chan, unsigned long flags);
655 struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 687 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -745,6 +777,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
745 return chan->device->device_prep_interleaved_dma(chan, xt, flags); 777 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
746} 778}
747 779
780static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
781 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
782 unsigned long flags)
783{
784 if (!chan || !chan->device)
785 return NULL;
786
787 return chan->device->device_prep_dma_memset(chan, dest, value,
788 len, flags);
789}
790
748static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( 791static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
749 struct dma_chan *chan, 792 struct dma_chan *chan,
750 struct scatterlist *dst_sg, unsigned int dst_nents, 793 struct scatterlist *dst_sg, unsigned int dst_nents,
@@ -820,6 +863,12 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
820 return dmaengine_check_align(dev->pq_align, off1, off2, len); 863 return dmaengine_check_align(dev->pq_align, off1, off2, len);
821} 864}
822 865
866static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
867 size_t off2, size_t len)
868{
869 return dmaengine_check_align(dev->fill_align, off1, off2, len);
870}
871
823static inline void 872static inline void
824dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) 873dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
825{ 874{
@@ -874,6 +923,33 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
874 BUG(); 923 BUG();
875} 924}
876 925
926static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
927 size_t dir_icg)
928{
929 if (inc) {
930 if (dir_icg)
931 return dir_icg;
932 else if (sgl)
933 return icg;
934 }
935
936 return 0;
937}
938
939static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
940 struct data_chunk *chunk)
941{
942 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
943 chunk->icg, chunk->dst_icg);
944}
945
946static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
947 struct data_chunk *chunk)
948{
949 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
950 chunk->icg, chunk->src_icg);
951}
952
877/* --- public DMA engine API --- */ 953/* --- public DMA engine API --- */
878 954
879#ifdef CONFIG_DMA_ENGINE 955#ifdef CONFIG_DMA_ENGINE
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 52456aa566a0..e1043f79122f 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,8 +11,8 @@
11#ifndef LINUX_DMAPOOL_H 11#ifndef LINUX_DMAPOOL_H
12#define LINUX_DMAPOOL_H 12#define LINUX_DMAPOOL_H
13 13
14#include <linux/scatterlist.h>
14#include <asm/io.h> 15#include <asm/io.h>
15#include <asm/scatterlist.h>
16 16
17struct device; 17struct device;
18 18
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index f820f0a336c9..5055ac34142d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -2,6 +2,7 @@
2#define __DMI_H__ 2#define __DMI_H__
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/kobject.h>
5#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
6 7
7/* enum dmi_field is in mod_devicetable.h */ 8/* enum dmi_field is in mod_devicetable.h */
@@ -74,7 +75,7 @@ struct dmi_header {
74 u8 type; 75 u8 type;
75 u8 length; 76 u8 length;
76 u16 handle; 77 u16 handle;
77}; 78} __packed;
78 79
79struct dmi_device { 80struct dmi_device {
80 struct list_head list; 81 struct list_head list;
@@ -93,6 +94,7 @@ struct dmi_dev_onboard {
93 int devfn; 94 int devfn;
94}; 95};
95 96
97extern struct kobject *dmi_kobj;
96extern int dmi_check_system(const struct dmi_system_id *list); 98extern int dmi_check_system(const struct dmi_system_id *list);
97const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); 99const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
98extern const char * dmi_get_system_info(int field); 100extern const char * dmi_get_system_info(int field);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2092965afca3..85ef051ac6fb 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -85,7 +85,8 @@ typedef struct {
85#define EFI_MEMORY_MAPPED_IO 11 85#define EFI_MEMORY_MAPPED_IO 11
86#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 86#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
87#define EFI_PAL_CODE 13 87#define EFI_PAL_CODE 13
88#define EFI_MAX_MEMORY_TYPE 14 88#define EFI_PERSISTENT_MEMORY 14
89#define EFI_MAX_MEMORY_TYPE 15
89 90
90/* Attribute values: */ 91/* Attribute values: */
91#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */ 92#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
@@ -96,6 +97,8 @@ typedef struct {
96#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ 97#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
97#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ 98#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
98#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ 99#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
100#define EFI_MEMORY_MORE_RELIABLE \
101 ((u64)0x0000000000010000ULL) /* higher reliability */
99#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ 102#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
100#define EFI_MEMORY_DESCRIPTOR_VERSION 1 103#define EFI_MEMORY_DESCRIPTOR_VERSION 1
101 104
@@ -868,6 +871,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
868extern void efi_late_init(void); 871extern void efi_late_init(void);
869extern void efi_free_boot_services(void); 872extern void efi_free_boot_services(void);
870extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); 873extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
874extern void efi_find_mirror(void);
871#else 875#else
872static inline void efi_late_init(void) {} 876static inline void efi_late_init(void) {}
873static inline void efi_free_boot_services(void) {} 877static inline void efi_free_boot_services(void) {}
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 45a91474487d..638b324f0291 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -39,6 +39,7 @@ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct reques
39typedef int (elevator_init_fn) (struct request_queue *, 39typedef int (elevator_init_fn) (struct request_queue *,
40 struct elevator_type *e); 40 struct elevator_type *e);
41typedef void (elevator_exit_fn) (struct elevator_queue *); 41typedef void (elevator_exit_fn) (struct elevator_queue *);
42typedef void (elevator_registered_fn) (struct request_queue *);
42 43
43struct elevator_ops 44struct elevator_ops
44{ 45{
@@ -68,6 +69,7 @@ struct elevator_ops
68 69
69 elevator_init_fn *elevator_init_fn; 70 elevator_init_fn *elevator_init_fn;
70 elevator_exit_fn *elevator_exit_fn; 71 elevator_exit_fn *elevator_exit_fn;
72 elevator_registered_fn *elevator_registered_fn;
71}; 73};
72 74
73#define ELV_NAME_MAX (16) 75#define ELV_NAME_MAX (16)
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 606563ef8a72..9012f8775208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -110,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr)
110 */ 110 */
111static inline bool is_multicast_ether_addr(const u8 *addr) 111static inline bool is_multicast_ether_addr(const u8 *addr)
112{ 112{
113 return 0x01 & addr[0]; 113#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
114 u32 a = *(const u32 *)addr;
115#else
116 u16 a = *(const u16 *)addr;
117#endif
118#ifdef __BIG_ENDIAN
119 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
120#else
121 return 0x01 & a;
122#endif
123}
124
125static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
126{
127#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
128#ifdef __BIG_ENDIAN
129 return 0x01 & ((*(const u64 *)addr) >> 56);
130#else
131 return 0x01 & (*(const u64 *)addr);
132#endif
133#else
134 return is_multicast_ether_addr(addr);
135#endif
114} 136}
115 137
116/** 138/**
@@ -169,6 +191,24 @@ static inline bool is_valid_ether_addr(const u8 *addr)
169} 191}
170 192
171/** 193/**
194 * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
195 * @proto: Ethertype/length value to be tested
196 *
197 * Check that the value from the Ethertype/length field is a valid Ethertype.
198 *
199 * Return true if the valid is an 802.3 supported Ethertype.
200 */
201static inline bool eth_proto_is_802_3(__be16 proto)
202{
203#ifndef __BIG_ENDIAN
204 /* if CPU is little endian mask off bits representing LSB */
205 proto &= htons(0xFF00);
206#endif
207 /* cast both to u16 and compare since LSB can be ignored */
208 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
209}
210
211/**
172 * eth_random_addr - Generate software assigned random Ethernet address 212 * eth_random_addr - Generate software assigned random Ethernet address
173 * @addr: Pointer to a six-byte array containing the Ethernet address 213 * @addr: Pointer to a six-byte array containing the Ethernet address
174 * 214 *
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 36f49c405dfb..b16d929fa75f 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,6 +1,9 @@
1/* 1/*
2 * External connector (extcon) class driver 2 * External connector (extcon) class driver
3 * 3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
4 * Copyright (C) 2012 Samsung Electronics 7 * Copyright (C) 2012 Samsung Electronics
5 * Author: Donggeun Kim <dg77.kim@samsung.com> 8 * Author: Donggeun Kim <dg77.kim@samsung.com>
6 * Author: MyungJoo Ham <myungjoo.ham@samsung.com> 9 * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -27,50 +30,35 @@
27#include <linux/notifier.h> 30#include <linux/notifier.h>
28#include <linux/sysfs.h> 31#include <linux/sysfs.h>
29 32
30#define SUPPORTED_CABLE_MAX 32
31#define CABLE_NAME_MAX 30
32
33/* 33/*
34 * The standard cable name is to help support general notifier 34 * Define the unique id of supported external connectors
35 * and notifiee device drivers to share the common names.
36 * Please use standard cable names unless your notifier device has
37 * a very unique and abnormal cable or
38 * the cable type is supposed to be used with only one unique
39 * pair of notifier/notifiee devices.
40 *
41 * Please add any other "standard" cables used with extcon dev.
42 *
43 * You may add a dot and number to specify version or specification
44 * of the specific cable if it is required. (e.g., "Fast-charger.18"
45 * and "Fast-charger.10" for 1.8A and 1.0A chargers)
46 * However, the notifiee and notifier should be able to handle such
47 * string and if the notifiee can negotiate the protocol or identify,
48 * you don't need such convention. This convention is helpful when
49 * notifier can distinguish but notifiee cannot.
50 */ 35 */
51enum extcon_cable_name { 36#define EXTCON_NONE 0
52 EXTCON_USB = 0, 37
53 EXTCON_USB_HOST, 38#define EXTCON_USB 1 /* USB connector */
54 EXTCON_TA, /* Travel Adaptor */ 39#define EXTCON_USB_HOST 2
55 EXTCON_FAST_CHARGER, 40
56 EXTCON_SLOW_CHARGER, 41#define EXTCON_TA 3 /* Charger connector */
57 EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */ 42#define EXTCON_FAST_CHARGER 4
58 EXTCON_HDMI, 43#define EXTCON_SLOW_CHARGER 5
59 EXTCON_MHL, 44#define EXTCON_CHARGE_DOWNSTREAM 6
60 EXTCON_DVI, 45
61 EXTCON_VGA, 46#define EXTCON_LINE_IN 7 /* Audio/Video connector */
62 EXTCON_DOCK, 47#define EXTCON_LINE_OUT 8
63 EXTCON_LINE_IN, 48#define EXTCON_MICROPHONE 9
64 EXTCON_LINE_OUT, 49#define EXTCON_HEADPHONE 10
65 EXTCON_MIC_IN, 50#define EXTCON_HDMI 11
66 EXTCON_HEADPHONE_OUT, 51#define EXTCON_MHL 12
67 EXTCON_SPDIF_IN, 52#define EXTCON_DVI 13
68 EXTCON_SPDIF_OUT, 53#define EXTCON_VGA 14
69 EXTCON_VIDEO_IN, 54#define EXTCON_SPDIF_IN 15
70 EXTCON_VIDEO_OUT, 55#define EXTCON_SPDIF_OUT 16
71 EXTCON_MECHANICAL, 56#define EXTCON_VIDEO_IN 17
72}; 57#define EXTCON_VIDEO_OUT 18
73extern const char extcon_cable_name[][CABLE_NAME_MAX + 1]; 58
59#define EXTCON_DOCK 19 /* Misc connector */
60#define EXTCON_JIG 20
61#define EXTCON_MECHANICAL 21
74 62
75struct extcon_cable; 63struct extcon_cable;
76 64
@@ -78,7 +66,7 @@ struct extcon_cable;
78 * struct extcon_dev - An extcon device represents one external connector. 66 * struct extcon_dev - An extcon device represents one external connector.
79 * @name: The name of this extcon device. Parent device name is 67 * @name: The name of this extcon device. Parent device name is
80 * used if NULL. 68 * used if NULL.
81 * @supported_cable: Array of supported cable names ending with NULL. 69 * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
82 * If supported_cable is NULL, cable name related APIs 70 * If supported_cable is NULL, cable name related APIs
83 * are disabled. 71 * are disabled.
84 * @mutually_exclusive: Array of mutually exclusive set of cables that cannot 72 * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
@@ -89,16 +77,14 @@ struct extcon_cable;
89 * be attached simulataneously. {0x7, 0} is equivalent to 77 * be attached simulataneously. {0x7, 0} is equivalent to
90 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there 78 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
91 * can be no simultaneous connections. 79 * can be no simultaneous connections.
92 * @print_name: An optional callback to override the method to print the
93 * name of the extcon device.
94 * @print_state: An optional callback to override the method to print the 80 * @print_state: An optional callback to override the method to print the
95 * status of the extcon device. 81 * status of the extcon device.
96 * @dev: Device of this extcon. 82 * @dev: Device of this extcon.
97 * @state: Attach/detach state of this extcon. Do not provide at 83 * @state: Attach/detach state of this extcon. Do not provide at
98 * register-time. 84 * register-time.
99 * @nh: Notifier for the state change events from this extcon 85 * @nh: Notifier for the state change events from this extcon
100 * @entry: To support list of extcon devices so that users can search 86 * @entry: To support list of extcon devices so that users can
101 * for extcon devices based on the extcon name. 87 * search for extcon devices based on the extcon name.
102 * @lock: 88 * @lock:
103 * @max_supported: Internal value to store the number of cables. 89 * @max_supported: Internal value to store the number of cables.
104 * @extcon_dev_type: Device_type struct to provide attribute_groups 90 * @extcon_dev_type: Device_type struct to provide attribute_groups
@@ -113,16 +99,15 @@ struct extcon_cable;
113struct extcon_dev { 99struct extcon_dev {
114 /* Optional user initializing data */ 100 /* Optional user initializing data */
115 const char *name; 101 const char *name;
116 const char **supported_cable; 102 const unsigned int *supported_cable;
117 const u32 *mutually_exclusive; 103 const u32 *mutually_exclusive;
118 104
119 /* Optional callbacks to override class functions */ 105 /* Optional callbacks to override class functions */
120 ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
121 ssize_t (*print_state)(struct extcon_dev *edev, char *buf); 106 ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
122 107
123 /* Internal data. Please do not set. */ 108 /* Internal data. Please do not set. */
124 struct device dev; 109 struct device dev;
125 struct raw_notifier_head nh; 110 struct raw_notifier_head *nh;
126 struct list_head entry; 111 struct list_head entry;
127 int max_supported; 112 int max_supported;
128 spinlock_t lock; /* could be called by irq handler */ 113 spinlock_t lock; /* could be called by irq handler */
@@ -161,8 +146,6 @@ struct extcon_cable {
161/** 146/**
162 * struct extcon_specific_cable_nb - An internal data for 147 * struct extcon_specific_cable_nb - An internal data for
163 * extcon_register_interest(). 148 * extcon_register_interest().
164 * @internal_nb: A notifier block bridging extcon notifier
165 * and cable notifier.
166 * @user_nb: user provided notifier block for events from 149 * @user_nb: user provided notifier block for events from
167 * a specific cable. 150 * a specific cable.
168 * @cable_index: the target cable. 151 * @cable_index: the target cable.
@@ -170,7 +153,6 @@ struct extcon_cable {
170 * @previous_value: the saved previous event value. 153 * @previous_value: the saved previous event value.
171 */ 154 */
172struct extcon_specific_cable_nb { 155struct extcon_specific_cable_nb {
173 struct notifier_block internal_nb;
174 struct notifier_block *user_nb; 156 struct notifier_block *user_nb;
175 int cable_index; 157 int cable_index;
176 struct extcon_dev *edev; 158 struct extcon_dev *edev;
@@ -194,10 +176,10 @@ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
194/* 176/*
195 * Following APIs control the memory of extcon device. 177 * Following APIs control the memory of extcon device.
196 */ 178 */
197extern struct extcon_dev *extcon_dev_allocate(const char **cables); 179extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
198extern void extcon_dev_free(struct extcon_dev *edev); 180extern void extcon_dev_free(struct extcon_dev *edev);
199extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, 181extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
200 const char **cables); 182 const unsigned int *cable);
201extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); 183extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
202 184
203/* 185/*
@@ -216,13 +198,10 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
216 198
217/* 199/*
218 * get/set_cable_state access each bit of the 32b encoded state value. 200 * get/set_cable_state access each bit of the 32b encoded state value.
219 * They are used to access the status of each cable based on the cable_name 201 * They are used to access the status of each cable based on the cable_name.
220 * or cable_index, which is retrieved by extcon_find_cable_index
221 */ 202 */
222extern int extcon_find_cable_index(struct extcon_dev *sdev, 203extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
223 const char *cable_name); 204extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
224extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index);
225extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index,
226 bool cable_state); 205 bool cable_state);
227 206
228extern int extcon_get_cable_state(struct extcon_dev *edev, 207extern int extcon_get_cable_state(struct extcon_dev *edev,
@@ -249,16 +228,21 @@ extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
249 * we do not recommend to use this for normal 'notifiee' device drivers who 228 * we do not recommend to use this for normal 'notifiee' device drivers who
250 * want to be notified by a specific external port of the notifier. 229 * want to be notified by a specific external port of the notifier.
251 */ 230 */
252extern int extcon_register_notifier(struct extcon_dev *edev, 231extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
232 struct notifier_block *nb);
233extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
253 struct notifier_block *nb); 234 struct notifier_block *nb);
254extern int extcon_unregister_notifier(struct extcon_dev *edev,
255 struct notifier_block *nb);
256 235
257/* 236/*
258 * Following API get the extcon device from devicetree. 237 * Following API get the extcon device from devicetree.
259 * This function use phandle of devicetree to get extcon device directly. 238 * This function use phandle of devicetree to get extcon device directly.
260 */ 239 */
261extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index); 240extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
241 int index);
242
243/* Following API to get information of extcon device */
244extern const char *extcon_get_edev_name(struct extcon_dev *edev);
245
262#else /* CONFIG_EXTCON */ 246#else /* CONFIG_EXTCON */
263static inline int extcon_dev_register(struct extcon_dev *edev) 247static inline int extcon_dev_register(struct extcon_dev *edev)
264{ 248{
@@ -276,7 +260,7 @@ static inline int devm_extcon_dev_register(struct device *dev,
276static inline void devm_extcon_dev_unregister(struct device *dev, 260static inline void devm_extcon_dev_unregister(struct device *dev,
277 struct extcon_dev *edev) { } 261 struct extcon_dev *edev) { }
278 262
279static inline struct extcon_dev *extcon_dev_allocate(const char **cables) 263static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
280{ 264{
281 return ERR_PTR(-ENOSYS); 265 return ERR_PTR(-ENOSYS);
282} 266}
@@ -284,7 +268,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
284static inline void extcon_dev_free(struct extcon_dev *edev) { } 268static inline void extcon_dev_free(struct extcon_dev *edev) { }
285 269
286static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, 270static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
287 const char **cables) 271 const unsigned int *cable)
288{ 272{
289 return ERR_PTR(-ENOSYS); 273 return ERR_PTR(-ENOSYS);
290} 274}
@@ -307,20 +291,14 @@ static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
307 return 0; 291 return 0;
308} 292}
309 293
310static inline int extcon_find_cable_index(struct extcon_dev *edev,
311 const char *cable_name)
312{
313 return 0;
314}
315
316static inline int extcon_get_cable_state_(struct extcon_dev *edev, 294static inline int extcon_get_cable_state_(struct extcon_dev *edev,
317 int cable_index) 295 unsigned int id)
318{ 296{
319 return 0; 297 return 0;
320} 298}
321 299
322static inline int extcon_set_cable_state_(struct extcon_dev *edev, 300static inline int extcon_set_cable_state_(struct extcon_dev *edev,
323 int cable_index, bool cable_state) 301 unsigned int id, bool cable_state)
324{ 302{
325 return 0; 303 return 0;
326} 304}
@@ -343,13 +321,15 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
343} 321}
344 322
345static inline int extcon_register_notifier(struct extcon_dev *edev, 323static inline int extcon_register_notifier(struct extcon_dev *edev,
346 struct notifier_block *nb) 324 unsigned int id,
325 struct notifier_block *nb)
347{ 326{
348 return 0; 327 return 0;
349} 328}
350 329
351static inline int extcon_unregister_notifier(struct extcon_dev *edev, 330static inline int extcon_unregister_notifier(struct extcon_dev *edev,
352 struct notifier_block *nb) 331 unsigned int id,
332 struct notifier_block *nb)
353{ 333{
354 return 0; 334 return 0;
355} 335}
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 9ca958c4e94c..53c60806bcfb 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -44,7 +44,7 @@ struct adc_jack_cond {
44 * @consumer_channel: Unique name to identify the channel on the consumer 44 * @consumer_channel: Unique name to identify the channel on the consumer
45 * side. This typically describes the channels used within 45 * side. This typically describes the channels used within
46 * the consumer. E.g. 'battery_voltage' 46 * the consumer. E.g. 'battery_voltage'
47 * @cable_names: array of cable names ending with null. 47 * @cable_names: array of extcon id for supported cables.
48 * @adc_contitions: array of struct adc_jack_cond conditions ending 48 * @adc_contitions: array of struct adc_jack_cond conditions ending
49 * with .state = 0 entry. This describes how to decode 49 * with .state = 0 entry. This describes how to decode
50 * adc values into extcon state. 50 * adc values into extcon state.
@@ -58,8 +58,7 @@ struct adc_jack_pdata {
58 const char *name; 58 const char *name;
59 const char *consumer_channel; 59 const char *consumer_channel;
60 60
61 /* The last entry should be NULL */ 61 const enum extcon *cable_names;
62 const char **cable_names;
63 62
64 /* The last entry's state should be 0 */ 63 /* The last entry's state should be 0 */
65 struct adc_jack_cond *adc_conditions; 64 struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 591f8c3ef410..920408a21ffd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -50,6 +50,8 @@
50#define MAX_ACTIVE_NODE_LOGS 8 50#define MAX_ACTIVE_NODE_LOGS 8
51#define MAX_ACTIVE_DATA_LOGS 8 51#define MAX_ACTIVE_DATA_LOGS 8
52 52
53#define VERSION_LEN 256
54
53/* 55/*
54 * For superblock 56 * For superblock
55 */ 57 */
@@ -86,6 +88,12 @@ struct f2fs_super_block {
86 __le32 extension_count; /* # of extensions below */ 88 __le32 extension_count; /* # of extensions below */
87 __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ 89 __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
88 __le32 cp_payload; 90 __le32 cp_payload;
91 __u8 version[VERSION_LEN]; /* the kernel version */
92 __u8 init_version[VERSION_LEN]; /* the initial kernel version */
93 __le32 feature; /* defined features */
94 __u8 encryption_level; /* versioning level for encryption */
95 __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
96 __u8 reserved[871]; /* valid reserved region */
89} __packed; 97} __packed;
90 98
91/* 99/*
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 230f87bdf5ad..fbb88740634a 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -47,6 +47,9 @@ struct files_struct {
47 * read mostly part 47 * read mostly part
48 */ 48 */
49 atomic_t count; 49 atomic_t count;
50 bool resize_in_progress;
51 wait_queue_head_t resize_wait;
52
50 struct fdtable __rcu *fdt; 53 struct fdtable __rcu *fdt;
51 struct fdtable fdtab; 54 struct fdtable fdtab;
52 /* 55 /*
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fa11b3a367be..17724f6ea983 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -207,6 +207,16 @@ struct bpf_prog_aux;
207 .off = OFF, \ 207 .off = OFF, \
208 .imm = 0 }) 208 .imm = 0 })
209 209
210/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
211
212#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
213 ((struct bpf_insn) { \
214 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
215 .dst_reg = DST, \
216 .src_reg = SRC, \
217 .off = OFF, \
218 .imm = 0 })
219
210/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 220/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
211 221
212#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 222#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
@@ -267,6 +277,14 @@ struct bpf_prog_aux;
267 .off = 0, \ 277 .off = 0, \
268 .imm = 0 }) 278 .imm = 0 })
269 279
280/* Internal classic blocks for direct assignment */
281
282#define __BPF_STMT(CODE, K) \
283 ((struct sock_filter) BPF_STMT(CODE, K))
284
285#define __BPF_JUMP(CODE, K, JT, JF) \
286 ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
287
270#define bytes_to_bpf_size(bytes) \ 288#define bytes_to_bpf_size(bytes) \
271({ \ 289({ \
272 int bpf_size = -EINVAL; \ 290 int bpf_size = -EINVAL; \
@@ -360,12 +378,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
360 378
361int sk_filter(struct sock *sk, struct sk_buff *skb); 379int sk_filter(struct sock *sk, struct sk_buff *skb);
362 380
363void bpf_prog_select_runtime(struct bpf_prog *fp); 381int bpf_prog_select_runtime(struct bpf_prog *fp);
364void bpf_prog_free(struct bpf_prog *fp); 382void bpf_prog_free(struct bpf_prog *fp);
365 383
366int bpf_convert_filter(struct sock_filter *prog, int len,
367 struct bpf_insn *new_prog, int *new_len);
368
369struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); 384struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
370struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 385struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
371 gfp_t gfp_extra_flags); 386 gfp_t gfp_extra_flags);
@@ -377,14 +392,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
377 __bpf_prog_free(fp); 392 __bpf_prog_free(fp);
378} 393}
379 394
395typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
396 unsigned int flen);
397
380int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); 398int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
399int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
400 bpf_aux_classic_check_t trans);
381void bpf_prog_destroy(struct bpf_prog *fp); 401void bpf_prog_destroy(struct bpf_prog *fp);
382 402
383int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 403int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
384int sk_attach_bpf(u32 ufd, struct sock *sk); 404int sk_attach_bpf(u32 ufd, struct sock *sk);
385int sk_detach_filter(struct sock *sk); 405int sk_detach_filter(struct sock *sk);
386
387int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
388int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 406int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
389 unsigned int len); 407 unsigned int len);
390 408
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 8293262401de..e65ef959546c 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -6,16 +6,16 @@
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7 7
8struct frontswap_ops { 8struct frontswap_ops {
9 void (*init)(unsigned); 9 void (*init)(unsigned); /* this swap type was just swapon'ed */
10 int (*store)(unsigned, pgoff_t, struct page *); 10 int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
11 int (*load)(unsigned, pgoff_t, struct page *); 11 int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
12 void (*invalidate_page)(unsigned, pgoff_t); 12 void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
13 void (*invalidate_area)(unsigned); 13 void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
14 struct frontswap_ops *next; /* private pointer to next ops */
14}; 15};
15 16
16extern bool frontswap_enabled; 17extern bool frontswap_enabled;
17extern struct frontswap_ops * 18extern void frontswap_register_ops(struct frontswap_ops *ops);
18 frontswap_register_ops(struct frontswap_ops *ops);
19extern void frontswap_shrink(unsigned long); 19extern void frontswap_shrink(unsigned long);
20extern unsigned long frontswap_curr_pages(void); 20extern unsigned long frontswap_curr_pages(void);
21extern void frontswap_writethrough(bool); 21extern void frontswap_writethrough(bool);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b577e801b4af..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -35,6 +35,7 @@
35#include <uapi/linux/fs.h> 35#include <uapi/linux/fs.h>
36 36
37struct backing_dev_info; 37struct backing_dev_info;
38struct bdi_writeback;
38struct export_operations; 39struct export_operations;
39struct hd_geometry; 40struct hd_geometry;
40struct iovec; 41struct iovec;
@@ -54,7 +55,8 @@ struct vm_fault;
54 55
55extern void __init inode_init(void); 56extern void __init inode_init(void);
56extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
57extern void __init files_init(unsigned long); 58extern void __init files_init(void);
59extern void __init files_maxfiles_init(void);
58 60
59extern struct files_stat_struct files_stat; 61extern struct files_stat_struct files_stat;
60extern unsigned long get_max_files(void); 62extern unsigned long get_max_files(void);
@@ -69,6 +71,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
69 struct buffer_head *bh_result, int create); 71 struct buffer_head *bh_result, int create);
70typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, 72typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
71 ssize_t bytes, void *private); 73 ssize_t bytes, void *private);
74typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
72 75
73#define MAY_EXEC 0x00000001 76#define MAY_EXEC 0x00000001
74#define MAY_WRITE 0x00000002 77#define MAY_WRITE 0x00000002
@@ -634,6 +637,14 @@ struct inode {
634 637
635 struct hlist_node i_hash; 638 struct hlist_node i_hash;
636 struct list_head i_wb_list; /* backing dev IO list */ 639 struct list_head i_wb_list; /* backing dev IO list */
640#ifdef CONFIG_CGROUP_WRITEBACK
641 struct bdi_writeback *i_wb; /* the associated cgroup wb */
642
643 /* foreign inode detection, see wbc_detach_inode() */
644 int i_wb_frn_winner;
645 u16 i_wb_frn_avg_time;
646 u16 i_wb_frn_history;
647#endif
637 struct list_head i_lru; /* inode LRU list */ 648 struct list_head i_lru; /* inode LRU list */
638 struct list_head i_sb_list; 649 struct list_head i_sb_list;
639 union { 650 union {
@@ -1036,12 +1047,12 @@ extern void locks_remove_file(struct file *);
1036extern void locks_release_private(struct file_lock *); 1047extern void locks_release_private(struct file_lock *);
1037extern void posix_test_lock(struct file *, struct file_lock *); 1048extern void posix_test_lock(struct file *, struct file_lock *);
1038extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); 1049extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
1039extern int posix_lock_file_wait(struct file *, struct file_lock *); 1050extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
1040extern int posix_unblock_lock(struct file_lock *); 1051extern int posix_unblock_lock(struct file_lock *);
1041extern int vfs_test_lock(struct file *, struct file_lock *); 1052extern int vfs_test_lock(struct file *, struct file_lock *);
1042extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); 1053extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
1043extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); 1054extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
1044extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); 1055extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
1045extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); 1056extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
1046extern void lease_get_mtime(struct inode *, struct timespec *time); 1057extern void lease_get_mtime(struct inode *, struct timespec *time);
1047extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); 1058extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1127,7 +1138,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1127 return -ENOLCK; 1138 return -ENOLCK;
1128} 1139}
1129 1140
1130static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1141static inline int posix_lock_inode_wait(struct inode *inode,
1142 struct file_lock *fl)
1131{ 1143{
1132 return -ENOLCK; 1144 return -ENOLCK;
1133} 1145}
@@ -1153,8 +1165,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1153 return 0; 1165 return 0;
1154} 1166}
1155 1167
1156static inline int flock_lock_file_wait(struct file *filp, 1168static inline int flock_lock_inode_wait(struct inode *inode,
1157 struct file_lock *request) 1169 struct file_lock *request)
1158{ 1170{
1159 return -ENOLCK; 1171 return -ENOLCK;
1160} 1172}
@@ -1192,6 +1204,20 @@ static inline void show_fd_locks(struct seq_file *f,
1192 struct file *filp, struct files_struct *files) {} 1204 struct file *filp, struct files_struct *files) {}
1193#endif /* !CONFIG_FILE_LOCKING */ 1205#endif /* !CONFIG_FILE_LOCKING */
1194 1206
1207static inline struct inode *file_inode(const struct file *f)
1208{
1209 return f->f_inode;
1210}
1211
1212static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1213{
1214 return posix_lock_inode_wait(file_inode(filp), fl);
1215}
1216
1217static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1218{
1219 return flock_lock_inode_wait(file_inode(filp), fl);
1220}
1195 1221
1196struct fasync_struct { 1222struct fasync_struct {
1197 spinlock_t fa_lock; 1223 spinlock_t fa_lock;
@@ -1232,6 +1258,8 @@ struct mm_struct;
1232#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ 1258#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1233#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ 1259#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1234 1260
1261/* sb->s_iflags */
1262#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1235 1263
1236/* Possible states of 'frozen' field */ 1264/* Possible states of 'frozen' field */
1237enum { 1265enum {
@@ -1270,6 +1298,7 @@ struct super_block {
1270 const struct quotactl_ops *s_qcop; 1298 const struct quotactl_ops *s_qcop;
1271 const struct export_operations *s_export_op; 1299 const struct export_operations *s_export_op;
1272 unsigned long s_flags; 1300 unsigned long s_flags;
1301 unsigned long s_iflags; /* internal SB_I_* flags */
1273 unsigned long s_magic; 1302 unsigned long s_magic;
1274 struct dentry *s_root; 1303 struct dentry *s_root;
1275 struct rw_semaphore s_umount; 1304 struct rw_semaphore s_umount;
@@ -1641,7 +1670,6 @@ struct inode_operations {
1641 int (*set_acl)(struct inode *, struct posix_acl *, int); 1670 int (*set_acl)(struct inode *, struct posix_acl *, int);
1642 1671
1643 /* WARNING: probably going away soon, do not use! */ 1672 /* WARNING: probably going away soon, do not use! */
1644 int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
1645} ____cacheline_aligned; 1673} ____cacheline_aligned;
1646 1674
1647ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, 1675ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1806,6 +1834,11 @@ struct super_operations {
1806 * 1834 *
1807 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). 1835 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
1808 * 1836 *
1837 * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
1838 * synchronize competing switching instances and to tell
1839 * wb stat updates to grab mapping->tree_lock. See
1840 * inode_switch_wb_work_fn() for details.
1841 *
1809 * Q: What is the difference between I_WILL_FREE and I_FREEING? 1842 * Q: What is the difference between I_WILL_FREE and I_FREEING?
1810 */ 1843 */
1811#define I_DIRTY_SYNC (1 << 0) 1844#define I_DIRTY_SYNC (1 << 0)
@@ -1825,6 +1858,7 @@ struct super_operations {
1825#define I_DIRTY_TIME (1 << 11) 1858#define I_DIRTY_TIME (1 << 11)
1826#define __I_DIRTY_TIME_EXPIRED 12 1859#define __I_DIRTY_TIME_EXPIRED 12
1827#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) 1860#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
1861#define I_WB_SWITCH (1 << 13)
1828 1862
1829#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) 1863#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
1830#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) 1864#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
@@ -1898,6 +1932,7 @@ struct file_system_type {
1898#define FS_HAS_SUBTYPE 4 1932#define FS_HAS_SUBTYPE 4
1899#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ 1933#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
1900#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */ 1934#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
1935#define FS_USERNS_VISIBLE 32 /* FS must already be visible */
1901#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ 1936#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
1902 struct dentry *(*mount) (struct file_system_type *, int, 1937 struct dentry *(*mount) (struct file_system_type *, int,
1903 const char *, void *); 1938 const char *, void *);
@@ -1985,7 +2020,6 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
1985extern int freeze_super(struct super_block *super); 2020extern int freeze_super(struct super_block *super);
1986extern int thaw_super(struct super_block *super); 2021extern int thaw_super(struct super_block *super);
1987extern bool our_mnt(struct vfsmount *mnt); 2022extern bool our_mnt(struct vfsmount *mnt);
1988extern bool fs_fully_visible(struct file_system_type *);
1989 2023
1990extern int current_umask(void); 2024extern int current_umask(void);
1991 2025
@@ -1993,11 +2027,6 @@ extern void ihold(struct inode * inode);
1993extern void iput(struct inode *); 2027extern void iput(struct inode *);
1994extern int generic_update_time(struct inode *, struct timespec *, int); 2028extern int generic_update_time(struct inode *, struct timespec *, int);
1995 2029
1996static inline struct inode *file_inode(const struct file *f)
1997{
1998 return f->f_inode;
1999}
2000
2001/* /sys/fs */ 2030/* /sys/fs */
2002extern struct kobject *fs_kobj; 2031extern struct kobject *fs_kobj;
2003 2032
@@ -2194,7 +2223,6 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
2194extern struct file *filp_open(const char *, int, umode_t); 2223extern struct file *filp_open(const char *, int, umode_t);
2195extern struct file *file_open_root(struct dentry *, struct vfsmount *, 2224extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2196 const char *, int); 2225 const char *, int);
2197extern int vfs_open(const struct path *, struct file *, const struct cred *);
2198extern struct file * dentry_open(const struct path *, int, const struct cred *); 2226extern struct file * dentry_open(const struct path *, int, const struct cred *);
2199extern int filp_close(struct file *, fl_owner_t id); 2227extern int filp_close(struct file *, fl_owner_t id);
2200 2228
@@ -2218,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
2218 2246
2219/* fs/dcache.c */ 2247/* fs/dcache.c */
2220extern void __init vfs_caches_init_early(void); 2248extern void __init vfs_caches_init_early(void);
2221extern void __init vfs_caches_init(unsigned long); 2249extern void __init vfs_caches_init(void);
2222 2250
2223extern struct kmem_cache *names_cachep; 2251extern struct kmem_cache *names_cachep;
2224 2252
@@ -2241,7 +2269,13 @@ extern struct super_block *freeze_bdev(struct block_device *);
2241extern void emergency_thaw_all(void); 2269extern void emergency_thaw_all(void);
2242extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); 2270extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
2243extern int fsync_bdev(struct block_device *); 2271extern int fsync_bdev(struct block_device *);
2244extern int sb_is_blkdev_sb(struct super_block *sb); 2272
2273extern struct super_block *blockdev_superblock;
2274
2275static inline bool sb_is_blkdev_sb(struct super_block *sb)
2276{
2277 return sb == blockdev_superblock;
2278}
2245#else 2279#else
2246static inline void bd_forget(struct inode *inode) {} 2280static inline void bd_forget(struct inode *inode) {}
2247static inline int sync_blockdev(struct block_device *bdev) { return 0; } 2281static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2280,6 +2314,9 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
2280extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, 2314extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
2281 void *holder); 2315 void *holder);
2282extern void blkdev_put(struct block_device *bdev, fmode_t mode); 2316extern void blkdev_put(struct block_device *bdev, fmode_t mode);
2317extern int __blkdev_reread_part(struct block_device *bdev);
2318extern int blkdev_reread_part(struct block_device *bdev);
2319
2283#ifdef CONFIG_SYSFS 2320#ifdef CONFIG_SYSFS
2284extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 2321extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
2285extern void bd_unlink_disk_holder(struct block_device *bdev, 2322extern void bd_unlink_disk_holder(struct block_device *bdev,
@@ -2502,6 +2539,8 @@ extern struct file * open_exec(const char *);
2502extern int is_subdir(struct dentry *, struct dentry *); 2539extern int is_subdir(struct dentry *, struct dentry *);
2503extern int path_is_under(struct path *, struct path *); 2540extern int path_is_under(struct path *, struct path *);
2504 2541
2542extern char *file_path(struct file *, char *, int);
2543
2505#include <linux/err.h> 2544#include <linux/err.h>
2506 2545
2507/* needed for stackable file system support */ 2546/* needed for stackable file system support */
@@ -2553,7 +2592,12 @@ extern struct inode *new_inode_pseudo(struct super_block *sb);
2553extern struct inode *new_inode(struct super_block *sb); 2592extern struct inode *new_inode(struct super_block *sb);
2554extern void free_inode_nonrcu(struct inode *inode); 2593extern void free_inode_nonrcu(struct inode *inode);
2555extern int should_remove_suid(struct dentry *); 2594extern int should_remove_suid(struct dentry *);
2556extern int file_remove_suid(struct file *); 2595extern int file_remove_privs(struct file *);
2596extern int dentry_needs_remove_privs(struct dentry *dentry);
2597static inline int file_needs_remove_privs(struct file *file)
2598{
2599 return dentry_needs_remove_privs(file->f_path.dentry);
2600}
2557 2601
2558extern void __insert_inode_hash(struct inode *, unsigned long hashval); 2602extern void __insert_inode_hash(struct inode *, unsigned long hashval);
2559static inline void insert_inode_hash(struct inode *inode) 2603static inline void insert_inode_hash(struct inode *inode)
@@ -2628,9 +2672,13 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
2628int dax_clear_blocks(struct inode *, sector_t block, long size); 2672int dax_clear_blocks(struct inode *, sector_t block, long size);
2629int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); 2673int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
2630int dax_truncate_page(struct inode *, loff_t from, get_block_t); 2674int dax_truncate_page(struct inode *, loff_t from, get_block_t);
2631int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); 2675int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
2676 dax_iodone_t);
2677int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
2678 dax_iodone_t);
2632int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); 2679int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
2633#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) 2680#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
2681#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
2634 2682
2635#ifdef CONFIG_BLOCK 2683#ifdef CONFIG_BLOCK
2636typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2684typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
@@ -2784,6 +2832,8 @@ extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned in
2784extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); 2832extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
2785extern const struct file_operations simple_dir_operations; 2833extern const struct file_operations simple_dir_operations;
2786extern const struct inode_operations simple_dir_inode_operations; 2834extern const struct inode_operations simple_dir_inode_operations;
2835extern void make_empty_dir_inode(struct inode *inode);
2836extern bool is_empty_dir_inode(struct inode *inode);
2787struct tree_descr { char *name; const struct file_operations *ops; int mode; }; 2837struct tree_descr { char *name; const struct file_operations *ops; int mode; };
2788struct dentry *d_alloc_name(struct dentry *, const char *); 2838struct dentry *d_alloc_name(struct dentry *, const char *);
2789extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); 2839extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 771484993ca7..604e1526cd00 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq;
74 */ 74 */
75typedef void (*fscache_operation_release_t)(struct fscache_operation *op); 75typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
76typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); 76typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
77typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
77 78
78enum fscache_operation_state { 79enum fscache_operation_state {
79 FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ 80 FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
@@ -109,6 +110,9 @@ struct fscache_operation {
109 * the op in a non-pool thread */ 110 * the op in a non-pool thread */
110 fscache_operation_processor_t processor; 111 fscache_operation_processor_t processor;
111 112
113 /* Operation cancellation cleanup (optional) */
114 fscache_operation_cancel_t cancel;
115
112 /* operation releaser */ 116 /* operation releaser */
113 fscache_operation_release_t release; 117 fscache_operation_release_t release;
114}; 118};
@@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work);
119extern void fscache_enqueue_operation(struct fscache_operation *); 123extern void fscache_enqueue_operation(struct fscache_operation *);
120extern void fscache_op_complete(struct fscache_operation *, bool); 124extern void fscache_op_complete(struct fscache_operation *, bool);
121extern void fscache_put_operation(struct fscache_operation *); 125extern void fscache_put_operation(struct fscache_operation *);
122 126extern void fscache_operation_init(struct fscache_operation *,
123/** 127 fscache_operation_processor_t,
124 * fscache_operation_init - Do basic initialisation of an operation 128 fscache_operation_cancel_t,
125 * @op: The operation to initialise 129 fscache_operation_release_t);
126 * @release: The release function to assign
127 *
128 * Do basic initialisation of an operation. The caller must still set flags,
129 * object and processor if needed.
130 */
131static inline void fscache_operation_init(struct fscache_operation *op,
132 fscache_operation_processor_t processor,
133 fscache_operation_release_t release)
134{
135 INIT_WORK(&op->work, fscache_op_work_func);
136 atomic_set(&op->usage, 1);
137 op->state = FSCACHE_OP_ST_INITIALISED;
138 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
139 op->processor = processor;
140 op->release = release;
141 INIT_LIST_HEAD(&op->pend_link);
142}
143 130
144/* 131/*
145 * data read operation 132 * data read operation
146 */ 133 */
147struct fscache_retrieval { 134struct fscache_retrieval {
148 struct fscache_operation op; 135 struct fscache_operation op;
136 struct fscache_cookie *cookie; /* The netfs cookie */
149 struct address_space *mapping; /* netfs pages */ 137 struct address_space *mapping; /* netfs pages */
150 fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ 138 fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
151 void *context; /* netfs read context (pinned) */ 139 void *context; /* netfs read context (pinned) */
@@ -371,6 +359,7 @@ struct fscache_object {
371#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ 359#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
372#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ 360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
373#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ 361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
374 363
375 struct list_head cache_link; /* link in cache->object_list */ 364 struct list_head cache_link; /* link in cache->object_list */
376 struct hlist_node cookie_link; /* link in cookie->backing_objects */ 365 struct hlist_node cookie_link; /* link in cookie->backing_objects */
@@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object)
410 return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); 399 return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
411} 400}
412 401
413static inline bool fscache_object_is_active(struct fscache_object *object) 402static inline bool fscache_cache_is_broken(struct fscache_object *object)
414{ 403{
415 return fscache_object_is_available(object) && 404 return test_bit(FSCACHE_IOERROR, &object->cache->flags);
416 fscache_object_is_live(object) &&
417 !test_bit(FSCACHE_IOERROR, &object->cache->flags);
418} 405}
419 406
420static inline bool fscache_object_is_dead(struct fscache_object *object) 407static inline bool fscache_object_is_active(struct fscache_object *object)
421{ 408{
422 return fscache_object_is_dying(object) && 409 return fscache_object_is_available(object) &&
423 test_bit(FSCACHE_IOERROR, &object->cache->flags); 410 fscache_object_is_live(object) &&
411 !fscache_cache_is_broken(object);
424} 412}
425 413
426/** 414/**
@@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
551 const void *data, 539 const void *data,
552 uint16_t datalen); 540 uint16_t datalen);
553 541
542extern void fscache_object_retrying_stale(struct fscache_object *object);
543
544enum fscache_why_object_killed {
545 FSCACHE_OBJECT_IS_STALE,
546 FSCACHE_OBJECT_NO_SPACE,
547 FSCACHE_OBJECT_WAS_RETIRED,
548 FSCACHE_OBJECT_WAS_CULLED,
549};
550extern void fscache_object_mark_killed(struct fscache_object *object,
551 enum fscache_why_object_killed why);
552
554#endif /* _LINUX_FSCACHE_CACHE_H */ 553#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index a82296af413f..2a2f56b292c1 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -24,6 +24,7 @@
24#define FSL_USB_VER_1_6 1 24#define FSL_USB_VER_1_6 1
25#define FSL_USB_VER_2_2 2 25#define FSL_USB_VER_2_2 2
26#define FSL_USB_VER_2_4 3 26#define FSL_USB_VER_2_4 3
27#define FSL_USB_VER_2_5 4
27 28
28#include <linux/types.h> 29#include <linux/types.h>
29 30
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0f313f93c586..65a517dd32f7 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -84,8 +84,6 @@ struct fsnotify_fname;
84 * Each group much define these ops. The fsnotify infrastructure will call 84 * Each group much define these ops. The fsnotify infrastructure will call
85 * these operations for each relevant group. 85 * these operations for each relevant group.
86 * 86 *
87 * should_send_event - given a group, inode, and mask this function determines
88 * if the group is interested in this event.
89 * handle_event - main call for a group to handle an fs event 87 * handle_event - main call for a group to handle an fs event
90 * free_group_priv - called when a group refcnt hits 0 to clean up the private union 88 * free_group_priv - called when a group refcnt hits 0 to clean up the private union
91 * freeing_mark - called when a mark is being destroyed for some reason. The group 89 * freeing_mark - called when a mark is being destroyed for some reason. The group
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da602982cf9..6cd8c0ee4b6f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
116 * SAVE_REGS. If another ops with this flag set is already registered 116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then 117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip. 118 * this ops will fail to register or set_filter_ip.
119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
119 */ 120 */
120enum { 121enum {
121 FTRACE_OPS_FL_ENABLED = 1 << 0, 122 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -132,6 +133,7 @@ enum {
132 FTRACE_OPS_FL_MODIFYING = 1 << 11, 133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
133 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
134 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
136 FTRACE_OPS_FL_PID = 1 << 14,
135}; 137};
136 138
137#ifdef CONFIG_DYNAMIC_FTRACE 139#ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
159 struct ftrace_ops *next; 161 struct ftrace_ops *next;
160 unsigned long flags; 162 unsigned long flags;
161 void *private; 163 void *private;
164 ftrace_func_t saved_func;
162 int __percpu *disabled; 165 int __percpu *disabled;
163#ifdef CONFIG_DYNAMIC_FTRACE 166#ifdef CONFIG_DYNAMIC_FTRACE
164 int nr_trampolines; 167 int nr_trampolines;
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 1ccaab44abcc..5383bb1394a1 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -119,16 +119,16 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
119 119
120extern struct gen_pool *devm_gen_pool_create(struct device *dev, 120extern struct gen_pool *devm_gen_pool_create(struct device *dev,
121 int min_alloc_order, int nid); 121 int min_alloc_order, int nid);
122extern struct gen_pool *dev_get_gen_pool(struct device *dev); 122extern struct gen_pool *gen_pool_get(struct device *dev);
123 123
124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, 124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
125 size_t size); 125 size_t size);
126 126
127#ifdef CONFIG_OF 127#ifdef CONFIG_OF
128extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, 128extern struct gen_pool *of_gen_pool_get(struct device_node *np,
129 const char *propname, int index); 129 const char *propname, int index);
130#else 130#else
131static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np, 131static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
132 const char *propname, int index) 132 const char *propname, int index)
133{ 133{
134 return NULL; 134 return NULL;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 15928f0647e4..ad35f300b9a4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -368,6 +368,11 @@ extern void free_pages(unsigned long addr, unsigned int order);
368extern void free_hot_cold_page(struct page *page, bool cold); 368extern void free_hot_cold_page(struct page *page, bool cold);
369extern void free_hot_cold_page_list(struct list_head *list, bool cold); 369extern void free_hot_cold_page_list(struct list_head *list, bool cold);
370 370
371struct page_frag_cache;
372extern void *__alloc_page_frag(struct page_frag_cache *nc,
373 unsigned int fragsz, gfp_t gfp_mask);
374extern void __free_page_frag(void *addr);
375
371extern void __free_kmem_pages(struct page *page, unsigned int order); 376extern void __free_kmem_pages(struct page *page, unsigned int order);
372extern void free_kmem_pages(unsigned long addr, unsigned int order); 377extern void free_kmem_pages(unsigned long addr, unsigned int order);
373 378
@@ -379,6 +384,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
379void drain_all_pages(struct zone *zone); 384void drain_all_pages(struct zone *zone);
380void drain_local_pages(struct zone *zone); 385void drain_local_pages(struct zone *zone);
381 386
387#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
388void page_alloc_init_late(void);
389#else
390static inline void page_alloc_init_late(void)
391{
392}
393#endif
394
382/* 395/*
383 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 396 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
384 * GFP flags are used before interrupts are enabled. Once interrupts are 397 * GFP flags are used before interrupts are enabled. Once interrupts are
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 569236e6b2bc..93e080b39cf6 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -3,13 +3,24 @@
3 3
4/* Helpers for Goldfish virtual platform */ 4/* Helpers for Goldfish virtual platform */
5 5
6static inline void gf_write64(unsigned long data, 6static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
7 void __iomem *portl, void __iomem *porth) 7 void __iomem *porth)
8{ 8{
9 writel((u32)data, portl); 9 writel((u32)(unsigned long)ptr, portl);
10#ifdef CONFIG_64BIT 10#ifdef CONFIG_64BIT
11 writel(data>>32, porth); 11 writel((unsigned long)ptr >> 32, porth);
12#endif 12#endif
13} 13}
14 14
15static inline void gf_write_dma_addr(const dma_addr_t addr,
16 void __iomem *portl,
17 void __iomem *porth)
18{
19 writel((u32)addr, portl);
20#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
21 writel(addr >> 32, porth);
22#endif
23}
24
25
15#endif /* __LINUX_GOLDFISH_H */ 26#endif /* __LINUX_GOLDFISH_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index fd098169fe87..adac255aee86 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -407,6 +407,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
407 return -EINVAL; 407 return -EINVAL;
408} 408}
409 409
410/* Child properties interface */
411struct fwnode_handle;
412
413static inline struct gpio_desc *fwnode_get_named_gpiod(
414 struct fwnode_handle *fwnode, const char *propname)
415{
416 return ERR_PTR(-ENOSYS);
417}
418
419static inline struct gpio_desc *devm_get_gpiod_from_child(
420 struct device *dev, const char *con_id, struct fwnode_handle *child)
421{
422 return ERR_PTR(-ENOSYS);
423}
424
410#endif /* CONFIG_GPIOLIB */ 425#endif /* CONFIG_GPIOLIB */
411 426
412/* 427/*
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index cc7ec129b329..c8393cd4d44f 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -45,7 +45,7 @@ struct seq_file;
45 * @base: identifies the first GPIO number handled by this chip; 45 * @base: identifies the first GPIO number handled by this chip;
46 * or, if negative during registration, requests dynamic ID allocation. 46 * or, if negative during registration, requests dynamic ID allocation.
47 * DEPRECATION: providing anything non-negative and nailing the base 47 * DEPRECATION: providing anything non-negative and nailing the base
48 * base offset of GPIO chips is deprecated. Please pass -1 as base to 48 * offset of GPIO chips is deprecated. Please pass -1 as base to
49 * let gpiolib select the chip base in all possible cases. We want to 49 * let gpiolib select the chip base in all possible cases. We want to
50 * get rid of the static GPIO number space in the long run. 50 * get rid of the static GPIO number space in the long run.
51 * @ngpio: the number of GPIOs handled by this controller; the last GPIO 51 * @ngpio: the number of GPIOs handled by this controller; the last GPIO
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 0042bf330b99..c02b5ce6c5cd 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -230,6 +230,7 @@ struct hid_sensor_common {
230 struct platform_device *pdev; 230 struct platform_device *pdev;
231 unsigned usage_id; 231 unsigned usage_id;
232 atomic_t data_ready; 232 atomic_t data_ready;
233 atomic_t user_requested_state;
233 struct iio_trigger *trigger; 234 struct iio_trigger *trigger;
234 struct hid_sensor_hub_attribute_info poll; 235 struct hid_sensor_hub_attribute_info poll;
235 struct hid_sensor_hub_attribute_info report_state; 236 struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 205026175c42..d891f949466a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -460,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
460 return &mm->page_table_lock; 460 return &mm->page_table_lock;
461} 461}
462 462
463static inline bool hugepages_supported(void) 463#ifndef hugepages_supported
464{ 464/*
465 /* 465 * Some platform decide whether they support huge pages at boot
466 * Some platform decide whether they support huge pages at boot 466 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
467 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 467 * when there is no such support
468 * there is no such support 468 */
469 */ 469#define hugepages_supported() (HPAGE_SHIFT != 0)
470 return HPAGE_SHIFT != 0; 470#endif
471}
472 471
473#else /* CONFIG_HUGETLB_PAGE */ 472#else /* CONFIG_HUGETLB_PAGE */
474struct hstate {}; 473struct hstate {};
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 3343298e40e8..859d673d98c8 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -26,6 +26,7 @@
26#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ 26#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
27 27
28struct device; 28struct device;
29struct device_node;
29struct hwspinlock; 30struct hwspinlock;
30struct hwspinlock_device; 31struct hwspinlock_device;
31struct hwspinlock_ops; 32struct hwspinlock_ops;
@@ -66,6 +67,7 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank);
66struct hwspinlock *hwspin_lock_request(void); 67struct hwspinlock *hwspin_lock_request(void);
67struct hwspinlock *hwspin_lock_request_specific(unsigned int id); 68struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
68int hwspin_lock_free(struct hwspinlock *hwlock); 69int hwspin_lock_free(struct hwspinlock *hwlock);
70int of_hwspin_lock_get_id(struct device_node *np, int index);
69int hwspin_lock_get_id(struct hwspinlock *hwlock); 71int hwspin_lock_get_id(struct hwspinlock *hwlock);
70int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, 72int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
71 unsigned long *); 73 unsigned long *);
@@ -120,6 +122,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120{ 122{
121} 123}
122 124
125static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
126{
127 return 0;
128}
129
123static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) 130static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
124{ 131{
125 return 0; 132 return 0;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 902c37aef67e..30d3a1f79450 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -160,16 +160,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
160 * 1 . 1 (Windows 7) 160 * 1 . 1 (Windows 7)
161 * 2 . 4 (Windows 8) 161 * 2 . 4 (Windows 8)
162 * 3 . 0 (Windows 8 R2) 162 * 3 . 0 (Windows 8 R2)
163 * 4 . 0 (Windows 10)
163 */ 164 */
164 165
165#define VERSION_WS2008 ((0 << 16) | (13)) 166#define VERSION_WS2008 ((0 << 16) | (13))
166#define VERSION_WIN7 ((1 << 16) | (1)) 167#define VERSION_WIN7 ((1 << 16) | (1))
167#define VERSION_WIN8 ((2 << 16) | (4)) 168#define VERSION_WIN8 ((2 << 16) | (4))
168#define VERSION_WIN8_1 ((3 << 16) | (0)) 169#define VERSION_WIN8_1 ((3 << 16) | (0))
170#define VERSION_WIN10 ((4 << 16) | (0))
169 171
170#define VERSION_INVAL -1 172#define VERSION_INVAL -1
171 173
172#define VERSION_CURRENT VERSION_WIN8_1 174#define VERSION_CURRENT VERSION_WIN10
173 175
174/* Make maximum size of pipe payload of 16K */ 176/* Make maximum size of pipe payload of 16K */
175#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 177#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -389,10 +391,7 @@ enum vmbus_channel_message_type {
389 CHANNELMSG_INITIATE_CONTACT = 14, 391 CHANNELMSG_INITIATE_CONTACT = 14,
390 CHANNELMSG_VERSION_RESPONSE = 15, 392 CHANNELMSG_VERSION_RESPONSE = 15,
391 CHANNELMSG_UNLOAD = 16, 393 CHANNELMSG_UNLOAD = 16,
392#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD 394 CHANNELMSG_UNLOAD_RESPONSE = 17,
393 CHANNELMSG_VIEWRANGE_ADD = 17,
394 CHANNELMSG_VIEWRANGE_REMOVE = 18,
395#endif
396 CHANNELMSG_COUNT 395 CHANNELMSG_COUNT
397}; 396};
398 397
@@ -549,21 +548,6 @@ struct vmbus_channel_gpadl_torndown {
549 u32 gpadl; 548 u32 gpadl;
550} __packed; 549} __packed;
551 550
552#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
553struct vmbus_channel_view_range_add {
554 struct vmbus_channel_message_header header;
555 PHYSICAL_ADDRESS viewrange_base;
556 u64 viewrange_length;
557 u32 child_relid;
558} __packed;
559
560struct vmbus_channel_view_range_remove {
561 struct vmbus_channel_message_header header;
562 PHYSICAL_ADDRESS viewrange_base;
563 u32 child_relid;
564} __packed;
565#endif
566
567struct vmbus_channel_relid_released { 551struct vmbus_channel_relid_released {
568 struct vmbus_channel_message_header header; 552 struct vmbus_channel_message_header header;
569 u32 child_relid; 553 u32 child_relid;
@@ -713,6 +697,11 @@ struct vmbus_channel {
713 /* The corresponding CPUID in the guest */ 697 /* The corresponding CPUID in the guest */
714 u32 target_cpu; 698 u32 target_cpu;
715 /* 699 /*
700 * State to manage the CPU affiliation of channels.
701 */
702 struct cpumask alloced_cpus_in_node;
703 int numa_node;
704 /*
716 * Support for sub-channels. For high performance devices, 705 * Support for sub-channels. For high performance devices,
717 * it will be useful to have multiple sub-channels to support 706 * it will be useful to have multiple sub-channels to support
718 * a scalable communication infrastructure with the host. 707 * a scalable communication infrastructure with the host.
@@ -745,6 +734,15 @@ struct vmbus_channel {
745 */ 734 */
746 struct list_head sc_list; 735 struct list_head sc_list;
747 /* 736 /*
737 * Current number of sub-channels.
738 */
739 int num_sc;
740 /*
741 * Number of a sub-channel (position within sc_list) which is supposed
742 * to be used as the next outgoing channel.
743 */
744 int next_oc;
745 /*
748 * The primary channel this sub-channel belongs to. 746 * The primary channel this sub-channel belongs to.
749 * This will be NULL for the primary channel. 747 * This will be NULL for the primary channel.
750 */ 748 */
@@ -758,9 +756,6 @@ struct vmbus_channel {
758 * link up channels based on their CPU affinity. 756 * link up channels based on their CPU affinity.
759 */ 757 */
760 struct list_head percpu_list; 758 struct list_head percpu_list;
761
762 int num_sc;
763 int next_oc;
764}; 759};
765 760
766static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 761static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
@@ -1236,13 +1231,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1236 struct icmsg_negotiate *, u8 *, int, 1231 struct icmsg_negotiate *, u8 *, int,
1237 int); 1232 int);
1238 1233
1239int hv_kvp_init(struct hv_util_service *);
1240void hv_kvp_deinit(void);
1241void hv_kvp_onchannelcallback(void *);
1242
1243int hv_vss_init(struct hv_util_service *);
1244void hv_vss_deinit(void);
1245void hv_vss_onchannelcallback(void *);
1246void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1234void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1247 1235
1248extern struct resource hyperv_mmio; 1236extern struct resource hyperv_mmio;
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 0bc03f100d04..9ad7828d9d34 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -675,6 +675,7 @@ struct twl4030_power_data {
675 struct twl4030_resconfig *board_config; 675 struct twl4030_resconfig *board_config;
676#define TWL4030_RESCONFIG_UNDEF ((u8)-1) 676#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
677 bool use_poweroff; /* Board is wired for TWL poweroff */ 677 bool use_poweroff; /* Board is wired for TWL poweroff */
678 bool ac_charger_quirk; /* Disable AC charger on board */
678}; 679};
679 680
680extern int twl4030_remove_script(u8 flags); 681extern int twl4030_remove_script(u8 flags);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 93b5ca754b5b..a633898f36ac 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -39,6 +39,19 @@
39 39
40struct device; 40struct device;
41 41
42/* IDE-specific values for req->cmd_type */
43enum ata_cmd_type_bits {
44 REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
45 REQ_TYPE_ATA_PC,
46 REQ_TYPE_ATA_SENSE, /* sense request */
47 REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
48 REQ_TYPE_ATA_PM_RESUME, /* resume request */
49};
50
51#define ata_pm_request(rq) \
52 ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
53 (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
54
42/* Error codes returned in rq->errors to the higher part of the driver. */ 55/* Error codes returned in rq->errors to the higher part of the driver. */
43enum { 56enum {
44 IDE_DRV_ERROR_GENERAL = 101, 57 IDE_DRV_ERROR_GENERAL = 101,
@@ -1314,6 +1327,19 @@ struct ide_port_info {
1314 u8 udma_mask; 1327 u8 udma_mask;
1315}; 1328};
1316 1329
1330/*
1331 * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
1332 * requests.
1333 */
1334struct ide_pm_state {
1335 /* PM state machine step value, currently driver specific */
1336 int pm_step;
1337 /* requested PM state value (S1, S2, S3, S4, ...) */
1338 u32 pm_state;
1339 void* data; /* for driver use */
1340};
1341
1342
1317int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); 1343int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
1318int ide_pci_init_two(struct pci_dev *, struct pci_dev *, 1344int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
1319 const struct ide_port_info *, void *); 1345 const struct ide_port_info *, void *);
@@ -1551,4 +1577,5 @@ static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
1551#define ide_host_for_each_port(i, port, host) \ 1577#define ide_host_for_each_port(i, port, host) \
1552 for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) 1578 for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
1553 1579
1580
1554#endif /* _IDE_H */ 1581#endif /* _IDE_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 8872ca103d06..1dc1f4ed4001 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -225,15 +225,13 @@ static inline bool ieee802154_is_valid_psdu_len(const u8 len)
225 * ieee802154_is_valid_psdu_len - check if extended addr is valid 225 * ieee802154_is_valid_psdu_len - check if extended addr is valid
226 * @addr: extended addr to check 226 * @addr: extended addr to check
227 */ 227 */
228static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) 228static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
229{ 229{
230 /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff 230 /* Bail out if the address is all zero, or if the group
231 * is used internally as extended to short address broadcast mapping. 231 * address bit is set.
232 * This is currently a workaround because neighbor discovery can't
233 * deal with short addresses types right now.
234 */ 232 */
235 return ((addr != cpu_to_le64(0x0000000000000000ULL)) && 233 return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
236 (addr != cpu_to_le64(0xffffffffffffffffULL))); 234 !(addr & cpu_to_le64(0x0100000000000000ULL)));
237} 235}
238 236
239/** 237/**
@@ -244,9 +242,9 @@ static inline void ieee802154_random_extended_addr(__le64 *addr)
244{ 242{
245 get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); 243 get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
246 244
247 /* toggle some bit if we hit an invalid extended addr */ 245 /* clear the group bit, and set the locally administered bit */
248 if (!ieee802154_is_valid_extended_addr(*addr)) 246 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01;
249 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; 247 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02;
250} 248}
251 249
252#endif /* LINUX_IEEE802154_H */ 250#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index da4929927f69..ae5d0d22955d 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -5,6 +5,15 @@
5 5
6 6
7/* We don't want this structure exposed to user space */ 7/* We don't want this structure exposed to user space */
8struct ifla_vf_stats {
9 __u64 rx_packets;
10 __u64 tx_packets;
11 __u64 rx_bytes;
12 __u64 tx_bytes;
13 __u64 broadcast;
14 __u64 multicast;
15};
16
8struct ifla_vf_info { 17struct ifla_vf_info {
9 __u32 vf; 18 __u32 vf;
10 __u8 mac[32]; 19 __u8 mac[32];
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6f6929ea8a0c..a4ccc3122f93 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -29,7 +29,7 @@ struct macvtap_queue;
29 * Maximum times a macvtap device can be opened. This can be used to 29 * Maximum times a macvtap device can be opened. This can be used to
30 * configure the number of receive queue, e.g. for multiqueue virtio. 30 * configure the number of receive queue, e.g. for multiqueue virtio.
31 */ 31 */
32#define MAX_MACVTAP_QUEUES 16 32#define MAX_MACVTAP_QUEUES 256
33 33
34#define MACVLAN_MC_FILTER_BITS 8 34#define MACVLAN_MC_FILTER_BITS 8
35#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) 35#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 66a7d7600f43..b49cf923becc 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -74,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po)
74struct module; 74struct module;
75 75
76struct pppox_proto { 76struct pppox_proto {
77 int (*create)(struct net *net, struct socket *sock); 77 int (*create)(struct net *net, struct socket *sock, int kern);
78 int (*ioctl)(struct socket *sock, unsigned int cmd, 78 int (*ioctl)(struct socket *sock, unsigned int cmd,
79 unsigned long arg); 79 unsigned long arg);
80 struct module *owner; 80 struct module *owner;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 920e4457ce6e..67ce5bd3b56a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
416/** 416/**
417 * __vlan_get_tag - get the VLAN ID that is part of the payload 417 * __vlan_get_tag - get the VLAN ID that is part of the payload
418 * @skb: skbuff to query 418 * @skb: skbuff to query
419 * @vlan_tci: buffer to store vlaue 419 * @vlan_tci: buffer to store value
420 * 420 *
421 * Returns error if the skb is not of VLAN type 421 * Returns error if the skb is not of VLAN type
422 */ 422 */
@@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
435/** 435/**
436 * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] 436 * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
437 * @skb: skbuff to query 437 * @skb: skbuff to query
438 * @vlan_tci: buffer to store vlaue 438 * @vlan_tci: buffer to store value
439 * 439 *
440 * Returns error if @skb->vlan_tci is not set correctly 440 * Returns error if @skb->vlan_tci is not set correctly
441 */ 441 */
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
456/** 456/**
457 * vlan_get_tag - get the VLAN ID from the skb 457 * vlan_get_tag - get the VLAN ID from the skb
458 * @skb: skbuff to query 458 * @skb: skbuff to query
459 * @vlan_tci: buffer to store vlaue 459 * @vlan_tci: buffer to store value
460 * 460 *
461 * Returns error if the skb is not VLAN tagged 461 * Returns error if the skb is not VLAN tagged
462 */ 462 */
@@ -539,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
539 */ 539 */
540 540
541 proto = vhdr->h_vlan_encapsulated_proto; 541 proto = vhdr->h_vlan_encapsulated_proto;
542 if (ntohs(proto) >= ETH_P_802_3_MIN) { 542 if (eth_proto_is_802_3(proto)) {
543 skb->protocol = proto; 543 skb->protocol = proto;
544 return; 544 return;
545 } 545 }
@@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
628 return features; 628 return features;
629} 629}
630 630
631/**
632 * compare_vlan_header - Compare two vlan headers
633 * @h1: Pointer to vlan header
634 * @h2: Pointer to vlan header
635 *
636 * Compare two vlan headers, returns 0 if equal.
637 *
638 * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
639 */
640static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
641 const struct vlan_hdr *h2)
642{
643#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
644 return *(u32 *)h1 ^ *(u32 *)h2;
645#else
646 return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
647 ((__force u32)h1->h_vlan_encapsulated_proto ^
648 (__force u32)h2->h_vlan_encapsulated_proto);
649#endif
650}
631#endif /* !(_LINUX_IF_VLAN_H_) */ 651#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 2c677afeea47..193ad488d3e2 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -130,5 +130,6 @@ extern void ip_mc_unmap(struct in_device *);
130extern void ip_mc_remap(struct in_device *); 130extern void ip_mc_remap(struct in_device *);
131extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); 131extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
132extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); 132extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
133int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
133 134
134#endif 135#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index eb8622b78ec9..1600c55828e0 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -29,6 +29,7 @@ struct iio_buffer;
29 * @set_length: set number of datums in buffer 29 * @set_length: set number of datums in buffer
30 * @release: called when the last reference to the buffer is dropped, 30 * @release: called when the last reference to the buffer is dropped,
31 * should free all resources allocated by the buffer. 31 * should free all resources allocated by the buffer.
32 * @modes: Supported operating modes by this buffer type
32 * 33 *
33 * The purpose of this structure is to make the buffer element 34 * The purpose of this structure is to make the buffer element
34 * modular as event for a given driver, different usecases may require 35 * modular as event for a given driver, different usecases may require
@@ -51,6 +52,8 @@ struct iio_buffer_access_funcs {
51 int (*set_length)(struct iio_buffer *buffer, int length); 52 int (*set_length)(struct iio_buffer *buffer, int length);
52 53
53 void (*release)(struct iio_buffer *buffer); 54 void (*release)(struct iio_buffer *buffer);
55
56 unsigned int modes;
54}; 57};
55 58
56/** 59/**
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d86b753e9b30..f79148261d16 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -32,6 +32,7 @@ enum iio_chan_info_enum {
32 IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, 32 IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
33 IIO_CHAN_INFO_AVERAGE_RAW, 33 IIO_CHAN_INFO_AVERAGE_RAW,
34 IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, 34 IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
35 IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
35 IIO_CHAN_INFO_SAMP_FREQ, 36 IIO_CHAN_INFO_SAMP_FREQ,
36 IIO_CHAN_INFO_FREQUENCY, 37 IIO_CHAN_INFO_FREQUENCY,
37 IIO_CHAN_INFO_PHASE, 38 IIO_CHAN_INFO_PHASE,
@@ -43,6 +44,8 @@ enum iio_chan_info_enum {
43 IIO_CHAN_INFO_CALIBWEIGHT, 44 IIO_CHAN_INFO_CALIBWEIGHT,
44 IIO_CHAN_INFO_DEBOUNCE_COUNT, 45 IIO_CHAN_INFO_DEBOUNCE_COUNT,
45 IIO_CHAN_INFO_DEBOUNCE_TIME, 46 IIO_CHAN_INFO_DEBOUNCE_TIME,
47 IIO_CHAN_INFO_CALIBEMISSIVITY,
48 IIO_CHAN_INFO_OVERSAMPLING_RATIO,
46}; 49};
47 50
48enum iio_shared_by { 51enum iio_shared_by {
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 942b6de68e2f..32b579525004 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -17,6 +17,8 @@ enum iio_event_info {
17 IIO_EV_INFO_VALUE, 17 IIO_EV_INFO_VALUE,
18 IIO_EV_INFO_HYSTERESIS, 18 IIO_EV_INFO_HYSTERESIS,
19 IIO_EV_INFO_PERIOD, 19 IIO_EV_INFO_PERIOD,
20 IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
21 IIO_EV_INFO_LOW_PASS_FILTER_3DB,
20}; 22};
21 23
22#define IIO_VAL_INT 1 24#define IIO_VAL_INT 1
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index ac48b10c9395..0e707f0c1a3e 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -24,6 +24,7 @@ struct inet_diag_handler {
24 struct inet_diag_msg *r, 24 struct inet_diag_msg *r,
25 void *info); 25 void *info);
26 __u16 idiag_type; 26 __u16 idiag_type;
27 __u16 idiag_info_size;
27}; 28};
28 29
29struct inet_connection_sock; 30struct inet_connection_sock;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0a21fbefdfbe..a4328cea376a 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -120,6 +120,9 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
120 || (!IN_DEV_FORWARD(in_dev) && \ 120 || (!IN_DEV_FORWARD(in_dev) && \
121 IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) 121 IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
122 122
123#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
124 IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
125
123#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) 126#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
124#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) 127#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
125#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) 128#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
diff --git a/include/linux/init.h b/include/linux/init.h
index 21b6d768edd7..b449f378f995 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -91,14 +91,6 @@
91 91
92#define __exit __section(.exit.text) __exitused __cold notrace 92#define __exit __section(.exit.text) __exitused __cold notrace
93 93
94/* temporary, until all users are removed */
95#define __cpuinit
96#define __cpuinitdata
97#define __cpuinitconst
98#define __cpuexit
99#define __cpuexitdata
100#define __cpuexitconst
101
102/* Used for MEMORY_HOTPLUG */ 94/* Used for MEMORY_HOTPLUG */
103#define __meminit __section(.meminit.text) __cold notrace 95#define __meminit __section(.meminit.text) __cold notrace
104#define __meminitdata __section(.meminit.data) 96#define __meminitdata __section(.meminit.data)
@@ -116,9 +108,6 @@
116#define __INITRODATA .section ".init.rodata","a",%progbits 108#define __INITRODATA .section ".init.rodata","a",%progbits
117#define __FINITDATA .previous 109#define __FINITDATA .previous
118 110
119/* temporary, until all users are removed */
120#define __CPUINIT
121
122#define __MEMINIT .section ".meminit.text", "ax" 111#define __MEMINIT .section ".meminit.text", "ax"
123#define __MEMINITDATA .section ".meminit.data", "aw" 112#define __MEMINITDATA .section ".meminit.data", "aw"
124#define __MEMINITRODATA .section ".meminit.rodata", "a" 113#define __MEMINITRODATA .section ".meminit.rodata", "a"
@@ -293,68 +282,8 @@ void __init parse_early_param(void);
293void __init parse_early_options(char *cmdline); 282void __init parse_early_options(char *cmdline);
294#endif /* __ASSEMBLY__ */ 283#endif /* __ASSEMBLY__ */
295 284
296/**
297 * module_init() - driver initialization entry point
298 * @x: function to be run at kernel boot time or module insertion
299 *
300 * module_init() will either be called during do_initcalls() (if
301 * builtin) or at module insertion time (if a module). There can only
302 * be one per module.
303 */
304#define module_init(x) __initcall(x);
305
306/**
307 * module_exit() - driver exit entry point
308 * @x: function to be run when driver is removed
309 *
310 * module_exit() will wrap the driver clean-up code
311 * with cleanup_module() when used with rmmod when
312 * the driver is a module. If the driver is statically
313 * compiled into the kernel, module_exit() has no effect.
314 * There can only be one per module.
315 */
316#define module_exit(x) __exitcall(x);
317
318#else /* MODULE */ 285#else /* MODULE */
319 286
320/*
321 * In most cases loadable modules do not need custom
322 * initcall levels. There are still some valid cases where
323 * a driver may be needed early if built in, and does not
324 * matter when built as a loadable module. Like bus
325 * snooping debug drivers.
326 */
327#define early_initcall(fn) module_init(fn)
328#define core_initcall(fn) module_init(fn)
329#define core_initcall_sync(fn) module_init(fn)
330#define postcore_initcall(fn) module_init(fn)
331#define postcore_initcall_sync(fn) module_init(fn)
332#define arch_initcall(fn) module_init(fn)
333#define subsys_initcall(fn) module_init(fn)
334#define subsys_initcall_sync(fn) module_init(fn)
335#define fs_initcall(fn) module_init(fn)
336#define fs_initcall_sync(fn) module_init(fn)
337#define rootfs_initcall(fn) module_init(fn)
338#define device_initcall(fn) module_init(fn)
339#define device_initcall_sync(fn) module_init(fn)
340#define late_initcall(fn) module_init(fn)
341#define late_initcall_sync(fn) module_init(fn)
342
343#define console_initcall(fn) module_init(fn)
344#define security_initcall(fn) module_init(fn)
345
346/* Each module must use one module_init(). */
347#define module_init(initfn) \
348 static inline initcall_t __inittest(void) \
349 { return initfn; } \
350 int init_module(void) __attribute__((alias(#initfn)));
351
352/* This is only required if you want to be unloadable. */
353#define module_exit(exitfn) \
354 static inline exitcall_t __exittest(void) \
355 { return exitfn; } \
356 void cleanup_module(void) __attribute__((alias(#exitfn)));
357
358#define __setup_param(str, unique_id, fn) /* nothing */ 287#define __setup_param(str, unique_id, fn) /* nothing */
359#define __setup(str, func) /* nothing */ 288#define __setup(str, func) /* nothing */
360#endif 289#endif
@@ -362,24 +291,6 @@ void __init parse_early_options(char *cmdline);
362/* Data marked not to be saved by software suspend */ 291/* Data marked not to be saved by software suspend */
363#define __nosavedata __section(.data..nosave) 292#define __nosavedata __section(.data..nosave)
364 293
365/* This means "can be init if no module support, otherwise module load
366 may call it." */
367#ifdef CONFIG_MODULES
368#define __init_or_module
369#define __initdata_or_module
370#define __initconst_or_module
371#define __INIT_OR_MODULE .text
372#define __INITDATA_OR_MODULE .data
373#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
374#else
375#define __init_or_module __init
376#define __initdata_or_module __initdata
377#define __initconst_or_module __initconst
378#define __INIT_OR_MODULE __INIT
379#define __INITDATA_OR_MODULE __INITDATA
380#define __INITRODATA_OR_MODULE __INITRODATA
381#endif /*CONFIG_MODULES*/
382
383#ifdef MODULE 294#ifdef MODULE
384#define __exit_p(x) x 295#define __exit_p(x) x
385#else 296#else
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index bb9b075f0eb0..e8493fee8160 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,13 +25,6 @@
25extern struct files_struct init_files; 25extern struct files_struct init_files;
26extern struct fs_struct init_fs; 26extern struct fs_struct init_fs;
27 27
28#ifdef CONFIG_CGROUPS
29#define INIT_GROUP_RWSEM(sig) \
30 .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
31#else
32#define INIT_GROUP_RWSEM(sig)
33#endif
34
35#ifdef CONFIG_CPUSETS 28#ifdef CONFIG_CPUSETS
36#define INIT_CPUSET_SEQ(tsk) \ 29#define INIT_CPUSET_SEQ(tsk) \
37 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), 30 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -55,7 +48,6 @@ extern struct fs_struct init_fs;
55 }, \ 48 }, \
56 .cred_guard_mutex = \ 49 .cred_guard_mutex = \
57 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 50 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
58 INIT_GROUP_RWSEM(sig) \
59} 51}
60 52
61extern struct nsproxy init_nsproxy; 53extern struct nsproxy init_nsproxy;
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index 08a5ef6e8f25..eecc9ea6cd58 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -12,9 +12,10 @@
12#include <linux/input.h> 12#include <linux/input.h>
13 13
14#ifdef CONFIG_OF 14#ifdef CONFIG_OF
15void touchscreen_parse_of_params(struct input_dev *dev); 15void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
16#else 16#else
17static inline void touchscreen_parse_of_params(struct input_dev *dev) 17static inline void touchscreen_parse_of_params(struct input_dev *dev,
18 bool multitouch)
18{ 19{
19} 20}
20#endif 21#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 3665cb331ca1..d9a366d24e3b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -297,6 +297,7 @@ struct q_inval {
297/* 1MB - maximum possible interrupt remapping table size */ 297/* 1MB - maximum possible interrupt remapping table size */
298#define INTR_REMAP_PAGE_ORDER 8 298#define INTR_REMAP_PAGE_ORDER 8
299#define INTR_REMAP_TABLE_REG_SIZE 0xf 299#define INTR_REMAP_TABLE_REG_SIZE 0xf
300#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
300 301
301#define INTR_REMAP_TABLE_ENTRIES 65536 302#define INTR_REMAP_TABLE_ENTRIES 65536
302 303
@@ -323,6 +324,9 @@ enum {
323 MAX_SR_DMAR_REGS 324 MAX_SR_DMAR_REGS
324}; 325};
325 326
327#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
328#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
329
326struct intel_iommu { 330struct intel_iommu {
327 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 331 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
328 u64 reg_phys; /* physical address of hw register set */ 332 u64 reg_phys; /* physical address of hw register set */
@@ -356,6 +360,7 @@ struct intel_iommu {
356#endif 360#endif
357 struct device *iommu_dev; /* IOMMU-sysfs device */ 361 struct device *iommu_dev; /* IOMMU-sysfs device */
358 int node; 362 int node;
363 u32 flags; /* Software defined flags */
359}; 364};
360 365
361static inline void __iommu_flush_cache( 366static inline void __iommu_flush_cache(
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0546b8710ce3..f9c1b6d0f2e4 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -114,6 +114,20 @@ enum iommu_attr {
114 DOMAIN_ATTR_MAX, 114 DOMAIN_ATTR_MAX,
115}; 115};
116 116
117/**
118 * struct iommu_dm_region - descriptor for a direct mapped memory region
119 * @list: Linked list pointers
120 * @start: System physical start address of the region
121 * @length: Length of the region in bytes
122 * @prot: IOMMU Protection flags (READ/WRITE/...)
123 */
124struct iommu_dm_region {
125 struct list_head list;
126 phys_addr_t start;
127 size_t length;
128 int prot;
129};
130
117#ifdef CONFIG_IOMMU_API 131#ifdef CONFIG_IOMMU_API
118 132
119/** 133/**
@@ -159,6 +173,10 @@ struct iommu_ops {
159 int (*domain_set_attr)(struct iommu_domain *domain, 173 int (*domain_set_attr)(struct iommu_domain *domain,
160 enum iommu_attr attr, void *data); 174 enum iommu_attr attr, void *data);
161 175
176 /* Request/Free a list of direct mapping requirements for a device */
177 void (*get_dm_regions)(struct device *dev, struct list_head *list);
178 void (*put_dm_regions)(struct device *dev, struct list_head *list);
179
162 /* Window handling functions */ 180 /* Window handling functions */
163 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 181 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
164 phys_addr_t paddr, u64 size, int prot); 182 phys_addr_t paddr, u64 size, int prot);
@@ -193,6 +211,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
193 struct device *dev); 211 struct device *dev);
194extern void iommu_detach_device(struct iommu_domain *domain, 212extern void iommu_detach_device(struct iommu_domain *domain,
195 struct device *dev); 213 struct device *dev);
214extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
196extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 215extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
197 phys_addr_t paddr, size_t size, int prot); 216 phys_addr_t paddr, size_t size, int prot);
198extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 217extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -204,6 +223,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
204extern void iommu_set_fault_handler(struct iommu_domain *domain, 223extern void iommu_set_fault_handler(struct iommu_domain *domain,
205 iommu_fault_handler_t handler, void *token); 224 iommu_fault_handler_t handler, void *token);
206 225
226extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
227extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
228extern int iommu_request_dm_for_dev(struct device *dev);
229
207extern int iommu_attach_group(struct iommu_domain *domain, 230extern int iommu_attach_group(struct iommu_domain *domain,
208 struct iommu_group *group); 231 struct iommu_group *group);
209extern void iommu_detach_group(struct iommu_domain *domain, 232extern void iommu_detach_group(struct iommu_domain *domain,
@@ -227,6 +250,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
227 struct notifier_block *nb); 250 struct notifier_block *nb);
228extern int iommu_group_id(struct iommu_group *group); 251extern int iommu_group_id(struct iommu_group *group);
229extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); 252extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
253extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
230 254
231extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, 255extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
232 void *data); 256 void *data);
@@ -234,7 +258,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
234 void *data); 258 void *data);
235struct device *iommu_device_create(struct device *parent, void *drvdata, 259struct device *iommu_device_create(struct device *parent, void *drvdata,
236 const struct attribute_group **groups, 260 const struct attribute_group **groups,
237 const char *fmt, ...); 261 const char *fmt, ...) __printf(4, 5);
238void iommu_device_destroy(struct device *dev); 262void iommu_device_destroy(struct device *dev);
239int iommu_device_link(struct device *dev, struct device *link); 263int iommu_device_link(struct device *dev, struct device *link);
240void iommu_device_unlink(struct device *dev, struct device *link); 264void iommu_device_unlink(struct device *dev, struct device *link);
@@ -332,6 +356,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
332{ 356{
333} 357}
334 358
359static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
360{
361 return NULL;
362}
363
335static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 364static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
336 phys_addr_t paddr, int gfp_order, int prot) 365 phys_addr_t paddr, int gfp_order, int prot)
337{ 366{
@@ -373,6 +402,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
373{ 402{
374} 403}
375 404
405static inline void iommu_get_dm_regions(struct device *dev,
406 struct list_head *list)
407{
408}
409
410static inline void iommu_put_dm_regions(struct device *dev,
411 struct list_head *list)
412{
413}
414
415static inline int iommu_request_dm_for_dev(struct device *dev)
416{
417 return -ENODEV;
418}
419
376static inline int iommu_attach_group(struct iommu_domain *domain, 420static inline int iommu_attach_group(struct iommu_domain *domain,
377 struct iommu_group *group) 421 struct iommu_group *group)
378{ 422{
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 812149160d3b..92188b0225bb 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -407,7 +407,6 @@ enum {
407 IRQCHIP_EOI_THREADED = (1 << 6), 407 IRQCHIP_EOI_THREADED = (1 << 6),
408}; 408};
409 409
410/* This include will go away once we isolated irq_desc usage to core code */
411#include <linux/irqdesc.h> 410#include <linux/irqdesc.h>
412 411
413/* 412/*
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 14d79131f53d..638887376e58 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -11,6 +11,20 @@
11#ifndef _LINUX_IRQCHIP_H 11#ifndef _LINUX_IRQCHIP_H
12#define _LINUX_IRQCHIP_H 12#define _LINUX_IRQCHIP_H
13 13
14#include <linux/of.h>
15
16/*
17 * This macro must be used by the different irqchip drivers to declare
18 * the association between their DT compatible string and their
19 * initialization function.
20 *
21 * @name: name that must be unique accross all IRQCHIP_DECLARE of the
22 * same file.
23 * @compstr: compatible string of the irqchip driver
24 * @fn: initialization function
25 */
26#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
27
14#ifdef CONFIG_IRQCHIP 28#ifdef CONFIG_IRQCHIP
15void irqchip_init(void); 29void irqchip_init(void);
16#else 30#else
diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h
new file mode 100644
index 000000000000..0ee319a4029d
--- /dev/null
+++ b/include/linux/irqchip/ingenic.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * You should have received a copy of the GNU General Public License along
10 * with this program; if not, write to the Free Software Foundation, Inc.,
11 * 675 Mass Ave, Cambridge, MA 02139, USA.
12 *
13 */
14
15#ifndef __LINUX_IRQCHIP_INGENIC_H__
16#define __LINUX_IRQCHIP_INGENIC_H__
17
18#include <linux/irq.h>
19
20extern void ingenic_intc_irq_suspend(struct irq_data *data);
21extern void ingenic_intc_irq_resume(struct irq_data *data);
22
23#endif
diff --git a/include/linux/irqchip/irq-sa11x0.h b/include/linux/irqchip/irq-sa11x0.h
new file mode 100644
index 000000000000..15db6829c1e4
--- /dev/null
+++ b/include/linux/irqchip/irq-sa11x0.h
@@ -0,0 +1,17 @@
1/*
2 * Generic IRQ handling for the SA11x0.
3 *
4 * Copyright (C) 2015 Dmitry Eremin-Solenikov
5 * Copyright (C) 1999-2001 Nicolas Pitre
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
13#define __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
14
15void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start);
16
17#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index c52d1480f272..fcea4e48e21f 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -3,9 +3,6 @@
3 3
4/* 4/*
5 * Core internal functions to deal with irq descriptors 5 * Core internal functions to deal with irq descriptors
6 *
7 * This include will move to kernel/irq once we cleaned up the tree.
8 * For now it's included from <linux/irq.h>
9 */ 6 */
10 7
11struct irq_affinity_notify; 8struct irq_affinity_notify;
@@ -90,7 +87,12 @@ struct irq_desc {
90 const char *name; 87 const char *name;
91} ____cacheline_internodealigned_in_smp; 88} ____cacheline_internodealigned_in_smp;
92 89
93#ifndef CONFIG_SPARSE_IRQ 90#ifdef CONFIG_SPARSE_IRQ
91extern void irq_lock_sparse(void);
92extern void irq_unlock_sparse(void);
93#else
94static inline void irq_lock_sparse(void) { }
95static inline void irq_unlock_sparse(void) { }
94extern struct irq_desc irq_desc[NR_IRQS]; 96extern struct irq_desc irq_desc[NR_IRQS];
95#endif 97#endif
96 98
@@ -103,6 +105,11 @@ static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
103#endif 105#endif
104} 106}
105 107
108static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
109{
110 return desc->irq_data.irq;
111}
112
106static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) 113static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
107{ 114{
108 return &desc->irq_data; 115 return &desc->irq_data;
@@ -188,6 +195,47 @@ __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
188 desc->name = name; 195 desc->name = name;
189} 196}
190 197
198/**
199 * irq_set_handler_locked - Set irq handler from a locked region
200 * @data: Pointer to the irq_data structure which identifies the irq
201 * @handler: Flow control handler function for this interrupt
202 *
203 * Sets the handler in the irq descriptor associated to @data.
204 *
205 * Must be called with irq_desc locked and valid parameters. Typical
206 * call site is the irq_set_type() callback.
207 */
208static inline void irq_set_handler_locked(struct irq_data *data,
209 irq_flow_handler_t handler)
210{
211 struct irq_desc *desc = irq_data_to_desc(data);
212
213 desc->handle_irq = handler;
214}
215
216/**
217 * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
218 * @data: Pointer to the irq_data structure for which the chip is set
219 * @chip: Pointer to the new irq chip
220 * @handler: Flow control handler function for this interrupt
221 * @name: Name of the interrupt
222 *
223 * Replace the irq chip at the proper hierarchy level in @data and
224 * sets the handler and name in the associated irq descriptor.
225 *
226 * Must be called with irq_desc locked and valid parameters.
227 */
228static inline void
229irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
230 irq_flow_handler_t handler, const char *name)
231{
232 struct irq_desc *desc = irq_data_to_desc(data);
233
234 desc->handle_irq = handler;
235 desc->name = name;
236 data->chip = chip;
237}
238
191static inline int irq_balancing_disabled(unsigned int irq) 239static inline int irq_balancing_disabled(unsigned int irq)
192{ 240{
193 struct irq_desc *desc; 241 struct irq_desc *desc;
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index fdd5cc16c9c4..9669bf9d4f48 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -23,12 +23,6 @@ unsigned int irq_get_next_irq(unsigned int offset);
23 ; \ 23 ; \
24 else 24 else
25 25
26#ifdef CONFIG_SMP
27#define irq_node(irq) (irq_get_irq_data(irq)->node)
28#else
29#define irq_node(irq) 0
30#endif
31
32# define for_each_active_irq(irq) \ 26# define for_each_active_irq(irq) \
33 for (irq = irq_get_next_irq(0); irq < nr_irqs; \ 27 for (irq = irq_get_next_irq(0); irq < nr_irqs; \
34 irq = irq_get_next_irq(irq + 1)) 28 irq = irq_get_next_irq(irq + 1))
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 20e7f78041c8..edb640ae9a94 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
1035int jbd2_journal_next_log_block(journal_t *, unsigned long long *); 1035int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
1036int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, 1036int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
1037 unsigned long *block); 1037 unsigned long *block);
1038void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); 1038int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1039void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); 1039void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1040 1040
1041/* Commit management */ 1041/* Commit management */
@@ -1157,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal);
1157extern int jbd2_journal_wipe (journal_t *, int); 1157extern int jbd2_journal_wipe (journal_t *, int);
1158extern int jbd2_journal_skip_recovery (journal_t *); 1158extern int jbd2_journal_skip_recovery (journal_t *);
1159extern void jbd2_journal_update_sb_errno(journal_t *); 1159extern void jbd2_journal_update_sb_errno(journal_t *);
1160extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t, 1160extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
1161 unsigned long, int); 1161 unsigned long, int);
1162extern void __jbd2_journal_abort_hard (journal_t *); 1162extern void __jbd2_journal_abort_hard (journal_t *);
1163extern void jbd2_journal_abort (journal_t *, int); 1163extern void jbd2_journal_abort (journal_t *, int);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 060dd7b61c6d..5582410727cb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -411,7 +411,8 @@ extern __printf(3, 0)
411int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); 411int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
412extern __printf(2, 3) 412extern __printf(2, 3)
413char *kasprintf(gfp_t gfp, const char *fmt, ...); 413char *kasprintf(gfp_t gfp, const char *fmt, ...);
414extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); 414extern __printf(2, 0)
415char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
415 416
416extern __scanf(2, 3) 417extern __scanf(2, 3)
417int sscanf(const char *, const char *, ...); 418int sscanf(const char *, const char *, ...);
@@ -439,6 +440,9 @@ extern int panic_on_unrecovered_nmi;
439extern int panic_on_io_nmi; 440extern int panic_on_io_nmi;
440extern int panic_on_warn; 441extern int panic_on_warn;
441extern int sysctl_panic_on_stackoverflow; 442extern int sysctl_panic_on_stackoverflow;
443
444extern bool crash_kexec_post_notifiers;
445
442/* 446/*
443 * Only to be used by arch init code. If the user over-wrote the default 447 * Only to be used by arch init code. If the user over-wrote the default
444 * CONFIG_PANIC_TIMEOUT, honor it. 448 * CONFIG_PANIC_TIMEOUT, honor it.
@@ -533,12 +537,6 @@ bool mac_pton(const char *s, u8 *mac);
533 * 537 *
534 * Most likely, you want to use tracing_on/tracing_off. 538 * Most likely, you want to use tracing_on/tracing_off.
535 */ 539 */
536#ifdef CONFIG_RING_BUFFER
537/* trace_off_permanent stops recording with no way to bring it back */
538void tracing_off_permanent(void);
539#else
540static inline void tracing_off_permanent(void) { }
541#endif
542 540
543enum ftrace_dump_mode { 541enum ftrace_dump_mode {
544 DUMP_NONE, 542 DUMP_NONE,
@@ -682,10 +680,10 @@ do { \
682 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ 680 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
683} while (0) 681} while (0)
684 682
685extern int 683extern __printf(2, 0) int
686__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); 684__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
687 685
688extern int 686extern __printf(2, 0) int
689__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); 687__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
690 688
691extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); 689extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
@@ -705,7 +703,7 @@ int trace_printk(const char *fmt, ...)
705{ 703{
706 return 0; 704 return 0;
707} 705}
708static inline int 706static __printf(1, 0) inline int
709ftrace_vprintk(const char *fmt, va_list ap) 707ftrace_vprintk(const char *fmt, va_list ap)
710{ 708{
711 return 0; 709 return 0;
@@ -819,13 +817,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
819#endif 817#endif
820 818
821/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ 819/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
822#define VERIFY_OCTAL_PERMISSIONS(perms) \ 820#define VERIFY_OCTAL_PERMISSIONS(perms) \
823 (BUILD_BUG_ON_ZERO((perms) < 0) + \ 821 (BUILD_BUG_ON_ZERO((perms) < 0) + \
824 BUILD_BUG_ON_ZERO((perms) > 0777) + \ 822 BUILD_BUG_ON_ZERO((perms) > 0777) + \
825 /* User perms >= group perms >= other perms */ \ 823 /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
826 BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \ 824 BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
827 BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \ 825 BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
828 /* Other writable? Generally considered a bad idea. */ \ 826 /* USER_WRITABLE >= GROUP_WRITABLE */ \
829 BUILD_BUG_ON_ZERO((perms) & 2) + \ 827 BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
828 /* OTHER_WRITABLE? Generally considered a bad idea. */ \
829 BUILD_BUG_ON_ZERO((perms) & 2) + \
830 (perms)) 830 (perms))
831#endif 831#endif
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 71ecdab1671b..123be25ea15a 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -45,6 +45,7 @@ enum kernfs_node_flag {
45 KERNFS_LOCKDEP = 0x0100, 45 KERNFS_LOCKDEP = 0x0100,
46 KERNFS_SUICIDAL = 0x0400, 46 KERNFS_SUICIDAL = 0x0400,
47 KERNFS_SUICIDED = 0x0800, 47 KERNFS_SUICIDED = 0x0800,
48 KERNFS_EMPTY_DIR = 0x1000,
48}; 49};
49 50
50/* @flags for kernfs_create_root() */ 51/* @flags for kernfs_create_root() */
@@ -277,6 +278,7 @@ void kernfs_put(struct kernfs_node *kn);
277 278
278struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry); 279struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
279struct kernfs_root *kernfs_root_from_sb(struct super_block *sb); 280struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
281struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
280 282
281struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, 283struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
282 unsigned int flags, void *priv); 284 unsigned int flags, void *priv);
@@ -285,6 +287,8 @@ void kernfs_destroy_root(struct kernfs_root *root);
285struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, 287struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
286 const char *name, umode_t mode, 288 const char *name, umode_t mode,
287 void *priv, const void *ns); 289 void *priv, const void *ns);
290struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
291 const char *name);
288struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, 292struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
289 const char *name, 293 const char *name,
290 umode_t mode, loff_t size, 294 umode_t mode, loff_t size,
@@ -352,6 +356,10 @@ static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
352static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) 356static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
353{ return NULL; } 357{ return NULL; }
354 358
359static inline struct inode *
360kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
361{ return NULL; }
362
355static inline struct kernfs_root * 363static inline struct kernfs_root *
356kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, 364kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
357 void *priv) 365 void *priv)
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index e705467ddb47..d0a1f99e24e3 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -28,7 +28,8 @@
28extern void kmemleak_init(void) __ref; 28extern void kmemleak_init(void) __ref;
29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
30 gfp_t gfp) __ref; 30 gfp_t gfp) __ref;
31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; 31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
32 gfp_t gfp) __ref;
32extern void kmemleak_free(const void *ptr) __ref; 33extern void kmemleak_free(const void *ptr) __ref;
33extern void kmemleak_free_part(const void *ptr, size_t size) __ref; 34extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
34extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; 35extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
@@ -71,7 +72,8 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
71 gfp_t gfp) 72 gfp_t gfp)
72{ 73{
73} 74}
74static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) 75static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
76 gfp_t gfp)
75{ 77{
76} 78}
77static inline void kmemleak_free(const void *ptr) 79static inline void kmemleak_free(const void *ptr)
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2d61b909f414..637f67002c5a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -80,8 +80,9 @@ struct kobject {
80 80
81extern __printf(2, 3) 81extern __printf(2, 3)
82int kobject_set_name(struct kobject *kobj, const char *name, ...); 82int kobject_set_name(struct kobject *kobj, const char *name, ...);
83extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 83extern __printf(2, 0)
84 va_list vargs); 84int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
85 va_list vargs);
85 86
86static inline const char *kobject_name(const struct kobject *kobj) 87static inline const char *kobject_name(const struct kobject *kobj)
87{ 88{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ad45054309a0..05e99b8ef465 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -44,6 +44,10 @@
44/* Two fragments for cross MMIO pages. */ 44/* Two fragments for cross MMIO pages. */
45#define KVM_MAX_MMIO_FRAGMENTS 2 45#define KVM_MAX_MMIO_FRAGMENTS 2
46 46
47#ifndef KVM_ADDRESS_SPACE_NUM
48#define KVM_ADDRESS_SPACE_NUM 1
49#endif
50
47/* 51/*
48 * For the normal pfn, the highest 12 bits should be zero, 52 * For the normal pfn, the highest 12 bits should be zero,
49 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 53 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
@@ -134,6 +138,7 @@ static inline bool is_error_page(struct page *page)
134#define KVM_REQ_ENABLE_IBS 23 138#define KVM_REQ_ENABLE_IBS 23
135#define KVM_REQ_DISABLE_IBS 24 139#define KVM_REQ_DISABLE_IBS 24
136#define KVM_REQ_APIC_PAGE_RELOAD 25 140#define KVM_REQ_APIC_PAGE_RELOAD 25
141#define KVM_REQ_SMI 26
137 142
138#define KVM_USERSPACE_IRQ_SOURCE_ID 0 143#define KVM_USERSPACE_IRQ_SOURCE_ID 0
139#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 144#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -230,6 +235,7 @@ struct kvm_vcpu {
230 235
231 int fpu_active; 236 int fpu_active;
232 int guest_fpu_loaded, guest_xcr0_loaded; 237 int guest_fpu_loaded, guest_xcr0_loaded;
238 unsigned char fpu_counter;
233 wait_queue_head_t wq; 239 wait_queue_head_t wq;
234 struct pid *pid; 240 struct pid *pid;
235 int sigset_active; 241 int sigset_active;
@@ -329,6 +335,13 @@ struct kvm_kernel_irq_routing_entry {
329#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 335#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
330#endif 336#endif
331 337
338#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
339static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
340{
341 return 0;
342}
343#endif
344
332/* 345/*
333 * Note: 346 * Note:
334 * memslots are not sorted by id anymore, please use id_to_memslot() 347 * memslots are not sorted by id anymore, please use id_to_memslot()
@@ -347,7 +360,7 @@ struct kvm {
347 spinlock_t mmu_lock; 360 spinlock_t mmu_lock;
348 struct mutex slots_lock; 361 struct mutex slots_lock;
349 struct mm_struct *mm; /* userspace tied to this vm */ 362 struct mm_struct *mm; /* userspace tied to this vm */
350 struct kvm_memslots *memslots; 363 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
351 struct srcu_struct srcu; 364 struct srcu_struct srcu;
352 struct srcu_struct irq_srcu; 365 struct srcu_struct irq_srcu;
353#ifdef CONFIG_KVM_APIC_ARCHITECTURE 366#ifdef CONFIG_KVM_APIC_ARCHITECTURE
@@ -462,13 +475,25 @@ void kvm_exit(void);
462void kvm_get_kvm(struct kvm *kvm); 475void kvm_get_kvm(struct kvm *kvm);
463void kvm_put_kvm(struct kvm *kvm); 476void kvm_put_kvm(struct kvm *kvm);
464 477
465static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 478static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
466{ 479{
467 return rcu_dereference_check(kvm->memslots, 480 return rcu_dereference_check(kvm->memslots[as_id],
468 srcu_read_lock_held(&kvm->srcu) 481 srcu_read_lock_held(&kvm->srcu)
469 || lockdep_is_held(&kvm->slots_lock)); 482 || lockdep_is_held(&kvm->slots_lock));
470} 483}
471 484
485static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
486{
487 return __kvm_memslots(kvm, 0);
488}
489
490static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
491{
492 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
493
494 return __kvm_memslots(vcpu->kvm, as_id);
495}
496
472static inline struct kvm_memory_slot * 497static inline struct kvm_memory_slot *
473id_to_memslot(struct kvm_memslots *slots, int id) 498id_to_memslot(struct kvm_memslots *slots, int id)
474{ 499{
@@ -500,21 +525,22 @@ enum kvm_mr_change {
500}; 525};
501 526
502int kvm_set_memory_region(struct kvm *kvm, 527int kvm_set_memory_region(struct kvm *kvm,
503 struct kvm_userspace_memory_region *mem); 528 const struct kvm_userspace_memory_region *mem);
504int __kvm_set_memory_region(struct kvm *kvm, 529int __kvm_set_memory_region(struct kvm *kvm,
505 struct kvm_userspace_memory_region *mem); 530 const struct kvm_userspace_memory_region *mem);
506void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 531void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
507 struct kvm_memory_slot *dont); 532 struct kvm_memory_slot *dont);
508int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 533int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
509 unsigned long npages); 534 unsigned long npages);
510void kvm_arch_memslots_updated(struct kvm *kvm); 535void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
511int kvm_arch_prepare_memory_region(struct kvm *kvm, 536int kvm_arch_prepare_memory_region(struct kvm *kvm,
512 struct kvm_memory_slot *memslot, 537 struct kvm_memory_slot *memslot,
513 struct kvm_userspace_memory_region *mem, 538 const struct kvm_userspace_memory_region *mem,
514 enum kvm_mr_change change); 539 enum kvm_mr_change change);
515void kvm_arch_commit_memory_region(struct kvm *kvm, 540void kvm_arch_commit_memory_region(struct kvm *kvm,
516 struct kvm_userspace_memory_region *mem, 541 const struct kvm_userspace_memory_region *mem,
517 const struct kvm_memory_slot *old, 542 const struct kvm_memory_slot *old,
543 const struct kvm_memory_slot *new,
518 enum kvm_mr_change change); 544 enum kvm_mr_change change);
519bool kvm_largepages_enabled(void); 545bool kvm_largepages_enabled(void);
520void kvm_disable_largepages(void); 546void kvm_disable_largepages(void);
@@ -524,8 +550,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
524void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 550void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
525 struct kvm_memory_slot *slot); 551 struct kvm_memory_slot *slot);
526 552
527int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 553int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
528 int nr_pages); 554 struct page **pages, int nr_pages);
529 555
530struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 556struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
531unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 557unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
@@ -538,13 +564,13 @@ void kvm_release_page_dirty(struct page *page);
538void kvm_set_page_accessed(struct page *page); 564void kvm_set_page_accessed(struct page *page);
539 565
540pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 566pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
541pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
542 bool write_fault, bool *writable);
543pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 567pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
544pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 568pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
545 bool *writable); 569 bool *writable);
546pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 570pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
547pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 571pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
572pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
573 bool *async, bool write_fault, bool *writable);
548 574
549void kvm_release_pfn_clean(pfn_t pfn); 575void kvm_release_pfn_clean(pfn_t pfn);
550void kvm_set_pfn_dirty(pfn_t pfn); 576void kvm_set_pfn_dirty(pfn_t pfn);
@@ -573,6 +599,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
573unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 599unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
574void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 600void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
575 601
602struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
603struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
604pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
605pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
606struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
607unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
608unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
609int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
610 int len);
611int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
612 unsigned long len);
613int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
614 unsigned long len);
615int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
616 int offset, int len);
617int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
618 unsigned long len);
619void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
620
576void kvm_vcpu_block(struct kvm_vcpu *vcpu); 621void kvm_vcpu_block(struct kvm_vcpu *vcpu);
577void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 622void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
578int kvm_vcpu_yield_to(struct kvm_vcpu *target); 623int kvm_vcpu_yield_to(struct kvm_vcpu *target);
@@ -689,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
689 return false; 734 return false;
690} 735}
691#endif 736#endif
737#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
738void kvm_arch_start_assignment(struct kvm *kvm);
739void kvm_arch_end_assignment(struct kvm *kvm);
740bool kvm_arch_has_assigned_device(struct kvm *kvm);
741#else
742static inline void kvm_arch_start_assignment(struct kvm *kvm)
743{
744}
745
746static inline void kvm_arch_end_assignment(struct kvm *kvm)
747{
748}
749
750static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
751{
752 return false;
753}
754#endif
692 755
693static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 756static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
694{ 757{
@@ -762,16 +825,10 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
762} 825}
763#endif 826#endif
764 827
765static inline void kvm_guest_enter(void) 828/* must be called with irqs disabled */
829static inline void __kvm_guest_enter(void)
766{ 830{
767 unsigned long flags;
768
769 BUG_ON(preemptible());
770
771 local_irq_save(flags);
772 guest_enter(); 831 guest_enter();
773 local_irq_restore(flags);
774
775 /* KVM does not hold any references to rcu protected data when it 832 /* KVM does not hold any references to rcu protected data when it
776 * switches CPU into a guest mode. In fact switching to a guest mode 833 * switches CPU into a guest mode. In fact switching to a guest mode
777 * is very similar to exiting to userspace from rcu point of view. In 834 * is very similar to exiting to userspace from rcu point of view. In
@@ -783,12 +840,27 @@ static inline void kvm_guest_enter(void)
783 rcu_virt_note_context_switch(smp_processor_id()); 840 rcu_virt_note_context_switch(smp_processor_id());
784} 841}
785 842
843/* must be called with irqs disabled */
844static inline void __kvm_guest_exit(void)
845{
846 guest_exit();
847}
848
849static inline void kvm_guest_enter(void)
850{
851 unsigned long flags;
852
853 local_irq_save(flags);
854 __kvm_guest_enter();
855 local_irq_restore(flags);
856}
857
786static inline void kvm_guest_exit(void) 858static inline void kvm_guest_exit(void)
787{ 859{
788 unsigned long flags; 860 unsigned long flags;
789 861
790 local_irq_save(flags); 862 local_irq_save(flags);
791 guest_exit(); 863 __kvm_guest_exit();
792 local_irq_restore(flags); 864 local_irq_restore(flags);
793} 865}
794 866
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 931da7e917cf..1b47a185c2f0 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -28,6 +28,7 @@ struct kvm_run;
28struct kvm_userspace_memory_region; 28struct kvm_userspace_memory_region;
29struct kvm_vcpu; 29struct kvm_vcpu;
30struct kvm_vcpu_init; 30struct kvm_vcpu_init;
31struct kvm_memslots;
31 32
32enum kvm_mr_change; 33enum kvm_mr_change;
33 34
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 9a2b000094cf..b122eeafb5dc 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -12,6 +12,7 @@
12#ifndef __LINUX_LEDS_H_INCLUDED 12#ifndef __LINUX_LEDS_H_INCLUDED
13#define __LINUX_LEDS_H_INCLUDED 13#define __LINUX_LEDS_H_INCLUDED
14 14
15#include <linux/device.h>
15#include <linux/list.h> 16#include <linux/list.h>
16#include <linux/mutex.h> 17#include <linux/mutex.h>
17#include <linux/rwsem.h> 18#include <linux/rwsem.h>
@@ -222,6 +223,11 @@ struct led_trigger {
222 struct list_head next_trig; 223 struct list_head next_trig;
223}; 224};
224 225
226ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
227 const char *buf, size_t count);
228ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
229 char *buf);
230
225/* Registration functions for complex triggers */ 231/* Registration functions for complex triggers */
226extern int led_trigger_register(struct led_trigger *trigger); 232extern int led_trigger_register(struct led_trigger *trigger);
227extern void led_trigger_unregister(struct led_trigger *trigger); 233extern void led_trigger_unregister(struct led_trigger *trigger);
@@ -238,6 +244,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
238 unsigned long *delay_on, 244 unsigned long *delay_on,
239 unsigned long *delay_off, 245 unsigned long *delay_off,
240 int invert); 246 int invert);
247extern void led_trigger_set_default(struct led_classdev *led_cdev);
248extern void led_trigger_set(struct led_classdev *led_cdev,
249 struct led_trigger *trigger);
250extern void led_trigger_remove(struct led_classdev *led_cdev);
251
252static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
253{
254 return led_cdev->trigger_data;
255}
256
241/** 257/**
242 * led_trigger_rename_static - rename a trigger 258 * led_trigger_rename_static - rename a trigger
243 * @name: the new trigger name 259 * @name: the new trigger name
@@ -267,6 +283,15 @@ static inline void led_trigger_register_simple(const char *name,
267static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} 283static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
268static inline void led_trigger_event(struct led_trigger *trigger, 284static inline void led_trigger_event(struct led_trigger *trigger,
269 enum led_brightness event) {} 285 enum led_brightness event) {}
286static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
287static inline void led_trigger_set(struct led_classdev *led_cdev,
288 struct led_trigger *trigger) {}
289static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
290static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
291{
292 return NULL;
293}
294
270#endif /* CONFIG_LEDS_TRIGGERS */ 295#endif /* CONFIG_LEDS_TRIGGERS */
271 296
272/* Trigger specific functions */ 297/* Trigger specific functions */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 28aeae46f355..c9cfbcdb8d14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -134,7 +134,6 @@ enum {
134 ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, 134 ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
135 135
136 ATA_SHT_EMULATED = 1, 136 ATA_SHT_EMULATED = 1,
137 ATA_SHT_CMD_PER_LUN = 1,
138 ATA_SHT_THIS_ID = -1, 137 ATA_SHT_THIS_ID = -1,
139 ATA_SHT_USE_CLUSTERING = 1, 138 ATA_SHT_USE_CLUSTERING = 1,
140 139
@@ -431,6 +430,9 @@ enum {
431 ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ 430 ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
432 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
433 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ 432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
434 ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
435 ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
434 436
435 /* DMA mask for user DMA control: User visible values; DO NOT 437 /* DMA mask for user DMA control: User visible values; DO NOT
436 renumber */ 438 renumber */
@@ -1364,7 +1366,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
1364 .can_queue = ATA_DEF_QUEUE, \ 1366 .can_queue = ATA_DEF_QUEUE, \
1365 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ 1367 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
1366 .this_id = ATA_SHT_THIS_ID, \ 1368 .this_id = ATA_SHT_THIS_ID, \
1367 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
1368 .emulated = ATA_SHT_EMULATED, \ 1369 .emulated = ATA_SHT_EMULATED, \
1369 .use_clustering = ATA_SHT_USE_CLUSTERING, \ 1370 .use_clustering = ATA_SHT_USE_CLUSTERING, \
1370 .proc_name = drv_name, \ 1371 .proc_name = drv_name, \
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 01508c7b8c81..2a663c6bb428 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -5,6 +5,10 @@
5 5
6#include <asm/byteorder.h> 6#include <asm/byteorder.h>
7 7
8typedef __be16 fdt16_t;
9typedef __be32 fdt32_t;
10typedef __be64 fdt64_t;
11
8#define fdt32_to_cpu(x) be32_to_cpu(x) 12#define fdt32_to_cpu(x) be32_to_cpu(x)
9#define cpu_to_fdt32(x) cpu_to_be32(x) 13#define cpu_to_fdt32(x) cpu_to_be32(x)
10#define fdt64_to_cpu(x) be64_to_cpu(x) 14#define fdt64_to_cpu(x) be64_to_cpu(x)
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
new file mode 100644
index 000000000000..75e3af01ee32
--- /dev/null
+++ b/include/linux/libnvdimm.h
@@ -0,0 +1,151 @@
1/*
2 * libnvdimm - Non-volatile-memory Devices Subsystem
3 *
4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15#ifndef __LIBNVDIMM_H__
16#define __LIBNVDIMM_H__
17#include <linux/kernel.h>
18#include <linux/sizes.h>
19#include <linux/types.h>
20
21enum {
22 /* when a dimm supports both PMEM and BLK access a label is required */
23 NDD_ALIASING = 1 << 0,
24 /* unarmed memory devices may not persist writes */
25 NDD_UNARMED = 1 << 1,
26
27 /* need to set a limit somewhere, but yes, this is likely overkill */
28 ND_IOCTL_MAX_BUFLEN = SZ_4M,
29 ND_CMD_MAX_ELEM = 4,
30 ND_CMD_MAX_ENVELOPE = 16,
31 ND_CMD_ARS_STATUS_MAX = SZ_4K,
32 ND_MAX_MAPPINGS = 32,
33
34 /* mark newly adjusted resources as requiring a label update */
35 DPA_RESOURCE_ADJUSTED = 1 << 0,
36};
37
38extern struct attribute_group nvdimm_bus_attribute_group;
39extern struct attribute_group nvdimm_attribute_group;
40extern struct attribute_group nd_device_attribute_group;
41extern struct attribute_group nd_numa_attribute_group;
42extern struct attribute_group nd_region_attribute_group;
43extern struct attribute_group nd_mapping_attribute_group;
44
45struct nvdimm;
46struct nvdimm_bus_descriptor;
47typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
48 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
49 unsigned int buf_len);
50
51struct nd_namespace_label;
52struct nvdimm_drvdata;
53struct nd_mapping {
54 struct nvdimm *nvdimm;
55 struct nd_namespace_label **labels;
56 u64 start;
57 u64 size;
58 /*
59 * @ndd is for private use at region enable / disable time for
60 * get_ndd() + put_ndd(), all other nd_mapping to ndd
61 * conversions use to_ndd() which respects enabled state of the
62 * nvdimm.
63 */
64 struct nvdimm_drvdata *ndd;
65};
66
67struct nvdimm_bus_descriptor {
68 const struct attribute_group **attr_groups;
69 unsigned long dsm_mask;
70 char *provider_name;
71 ndctl_fn ndctl;
72};
73
74struct nd_cmd_desc {
75 int in_num;
76 int out_num;
77 u32 in_sizes[ND_CMD_MAX_ELEM];
78 int out_sizes[ND_CMD_MAX_ELEM];
79};
80
81struct nd_interleave_set {
82 u64 cookie;
83};
84
85struct nd_region_desc {
86 struct resource *res;
87 struct nd_mapping *nd_mapping;
88 u16 num_mappings;
89 const struct attribute_group **attr_groups;
90 struct nd_interleave_set *nd_set;
91 void *provider_data;
92 int num_lanes;
93 int numa_node;
94};
95
96struct nvdimm_bus;
97struct module;
98struct device;
99struct nd_blk_region;
100struct nd_blk_region_desc {
101 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
102 void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
103 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
104 void *iobuf, u64 len, int rw);
105 struct nd_region_desc ndr_desc;
106};
107
108static inline struct nd_blk_region_desc *to_blk_region_desc(
109 struct nd_region_desc *ndr_desc)
110{
111 return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
112
113}
114
115struct nvdimm_bus *__nvdimm_bus_register(struct device *parent,
116 struct nvdimm_bus_descriptor *nfit_desc, struct module *module);
117#define nvdimm_bus_register(parent, desc) \
118 __nvdimm_bus_register(parent, desc, THIS_MODULE)
119void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
120struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
121struct nvdimm *to_nvdimm(struct device *dev);
122struct nd_region *to_nd_region(struct device *dev);
123struct nd_blk_region *to_nd_blk_region(struct device *dev);
124struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
125const char *nvdimm_name(struct nvdimm *nvdimm);
126void *nvdimm_provider_data(struct nvdimm *nvdimm);
127struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
128 const struct attribute_group **groups, unsigned long flags,
129 unsigned long *dsm_mask);
130const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
131const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
132u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
133 const struct nd_cmd_desc *desc, int idx, void *buf);
134u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
135 const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
136 const u32 *out_field);
137int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count);
138struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
139 struct nd_region_desc *ndr_desc);
140struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
141 struct nd_region_desc *ndr_desc);
142struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
143 struct nd_region_desc *ndr_desc);
144void *nd_region_provider_data(struct nd_region *nd_region);
145void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
146void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
147struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
148unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
149void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
150u64 nd_fletcher64(void *addr, size_t len, bool le);
151#endif /* __LIBNVDIMM_H__ */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index ee6dbb39a809..31db7a05dd36 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -99,7 +99,7 @@ struct klp_object {
99 struct klp_func *funcs; 99 struct klp_func *funcs;
100 100
101 /* internal */ 101 /* internal */
102 struct kobject *kobj; 102 struct kobject kobj;
103 struct module *mod; 103 struct module *mod;
104 enum klp_state state; 104 enum klp_state state;
105}; 105};
@@ -123,6 +123,12 @@ struct klp_patch {
123 enum klp_state state; 123 enum klp_state state;
124}; 124};
125 125
126#define klp_for_each_object(patch, obj) \
127 for (obj = patch->objs; obj->funcs; obj++)
128
129#define klp_for_each_func(obj, func) \
130 for (func = obj->funcs; func->old_name; func++)
131
126int klp_register_patch(struct klp_patch *); 132int klp_register_patch(struct klp_patch *);
127int klp_unregister_patch(struct klp_patch *); 133int klp_unregister_patch(struct klp_patch *);
128int klp_enable_patch(struct klp_patch *); 134int klp_enable_patch(struct klp_patch *);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2722111591a3..70400dc7660f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -255,6 +255,7 @@ struct held_lock {
255 unsigned int check:1; /* see lock_acquire() comment */ 255 unsigned int check:1; /* see lock_acquire() comment */
256 unsigned int hardirqs_off:1; 256 unsigned int hardirqs_off:1;
257 unsigned int references:12; /* 32 bits */ 257 unsigned int references:12; /* 32 bits */
258 unsigned int pin_count;
258}; 259};
259 260
260/* 261/*
@@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
354extern void lockdep_clear_current_reclaim_state(void); 355extern void lockdep_clear_current_reclaim_state(void);
355extern void lockdep_trace_alloc(gfp_t mask); 356extern void lockdep_trace_alloc(gfp_t mask);
356 357
358extern void lock_pin_lock(struct lockdep_map *lock);
359extern void lock_unpin_lock(struct lockdep_map *lock);
360
357# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, 361# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
358 362
359#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 363#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask);
368 372
369#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 373#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
370 374
375#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
376#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
377
371#else /* !CONFIG_LOCKDEP */ 378#else /* !CONFIG_LOCKDEP */
372 379
373static inline void lockdep_off(void) 380static inline void lockdep_off(void)
@@ -420,6 +427,9 @@ struct lock_class_key { };
420 427
421#define lockdep_recursing(tsk) (0) 428#define lockdep_recursing(tsk) (0)
422 429
430#define lockdep_pin_lock(l) do { (void)(l); } while (0)
431#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
432
423#endif /* !LOCKDEP */ 433#endif /* !LOCKDEP */
424 434
425#ifdef CONFIG_LOCK_STAT 435#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
new file mode 100644
index 000000000000..9429f054c323
--- /dev/null
+++ b/include/linux/lsm_hooks.h
@@ -0,0 +1,1888 @@
1/*
2 * Linux Security Module interfaces
3 *
4 * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
5 * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
6 * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
7 * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
8 * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
9 * Copyright (C) 2015 Intel Corporation.
10 * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * Due to this file being licensed under the GPL there is controversy over
18 * whether this permits you to write a module that #includes this file
19 * without placing your module under the GPL. Please consult a lawyer for
20 * advice before doing this.
21 *
22 */
23
24#ifndef __LINUX_LSM_HOOKS_H
25#define __LINUX_LSM_HOOKS_H
26
27#include <linux/security.h>
28#include <linux/init.h>
29#include <linux/rculist.h>
30
31/**
32 * Security hooks for program execution operations.
33 *
34 * @bprm_set_creds:
35 * Save security information in the bprm->security field, typically based
36 * on information about the bprm->file, for later use by the apply_creds
37 * hook. This hook may also optionally check permissions (e.g. for
38 * transitions between security domains).
39 * This hook may be called multiple times during a single execve, e.g. for
40 * interpreters. The hook can tell whether it has already been called by
41 * checking to see if @bprm->security is non-NULL. If so, then the hook
42 * may decide either to retain the security information saved earlier or
43 * to replace it.
44 * @bprm contains the linux_binprm structure.
45 * Return 0 if the hook is successful and permission is granted.
46 * @bprm_check_security:
47 * This hook mediates the point when a search for a binary handler will
48 * begin. It allows a check the @bprm->security value which is set in the
49 * preceding set_creds call. The primary difference from set_creds is
50 * that the argv list and envp list are reliably available in @bprm. This
51 * hook may be called multiple times during a single execve; and in each
52 * pass set_creds is called first.
53 * @bprm contains the linux_binprm structure.
54 * Return 0 if the hook is successful and permission is granted.
55 * @bprm_committing_creds:
56 * Prepare to install the new security attributes of a process being
57 * transformed by an execve operation, based on the old credentials
58 * pointed to by @current->cred and the information set in @bprm->cred by
59 * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
60 * This hook is a good place to perform state changes on the process such
61 * as closing open file descriptors to which access will no longer be
62 * granted when the attributes are changed. This is called immediately
63 * before commit_creds().
64 * @bprm_committed_creds:
65 * Tidy up after the installation of the new security attributes of a
66 * process being transformed by an execve operation. The new credentials
67 * have, by this point, been set to @current->cred. @bprm points to the
68 * linux_binprm structure. This hook is a good place to perform state
69 * changes on the process such as clearing out non-inheritable signal
70 * state. This is called immediately after commit_creds().
71 * @bprm_secureexec:
72 * Return a boolean value (0 or 1) indicating whether a "secure exec"
73 * is required. The flag is passed in the auxiliary table
74 * on the initial stack to the ELF interpreter to indicate whether libc
75 * should enable secure mode.
76 * @bprm contains the linux_binprm structure.
77 *
78 * Security hooks for filesystem operations.
79 *
80 * @sb_alloc_security:
81 * Allocate and attach a security structure to the sb->s_security field.
82 * The s_security field is initialized to NULL when the structure is
83 * allocated.
84 * @sb contains the super_block structure to be modified.
85 * Return 0 if operation was successful.
86 * @sb_free_security:
87 * Deallocate and clear the sb->s_security field.
88 * @sb contains the super_block structure to be modified.
89 * @sb_statfs:
90 * Check permission before obtaining filesystem statistics for the @mnt
91 * mountpoint.
92 * @dentry is a handle on the superblock for the filesystem.
93 * Return 0 if permission is granted.
94 * @sb_mount:
95 * Check permission before an object specified by @dev_name is mounted on
96 * the mount point named by @nd. For an ordinary mount, @dev_name
97 * identifies a device if the file system type requires a device. For a
98 * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
99 * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
100 * pathname of the object being mounted.
101 * @dev_name contains the name for object being mounted.
102 * @path contains the path for mount point object.
103 * @type contains the filesystem type.
104 * @flags contains the mount flags.
105 * @data contains the filesystem-specific data.
106 * Return 0 if permission is granted.
107 * @sb_copy_data:
108 * Allow mount option data to be copied prior to parsing by the filesystem,
109 * so that the security module can extract security-specific mount
110 * options cleanly (a filesystem may modify the data e.g. with strsep()).
111 * This also allows the original mount data to be stripped of security-
112 * specific options to avoid having to make filesystems aware of them.
113 * @type the type of filesystem being mounted.
114 * @orig the original mount data copied from userspace.
115 * @copy copied data which will be passed to the security module.
116 * Returns 0 if the copy was successful.
117 * @sb_remount:
118 * Extracts security system specific mount options and verifies no changes
119 * are being made to those options.
120 * @sb superblock being remounted
121 * @data contains the filesystem-specific data.
122 * Return 0 if permission is granted.
123 * @sb_umount:
124 * Check permission before the @mnt file system is unmounted.
125 * @mnt contains the mounted file system.
126 * @flags contains the unmount flags, e.g. MNT_FORCE.
127 * Return 0 if permission is granted.
128 * @sb_pivotroot:
129 * Check permission before pivoting the root filesystem.
130 * @old_path contains the path for the new location of the
131 * current root (put_old).
132 * @new_path contains the path for the new root (new_root).
133 * Return 0 if permission is granted.
134 * @sb_set_mnt_opts:
135 * Set the security relevant mount options used for a superblock
136 * @sb the superblock to set security mount options for
137 * @opts binary data structure containing all lsm mount data
138 * @sb_clone_mnt_opts:
139 * Copy all security options from a given superblock to another
140 * @oldsb old superblock which contain information to clone
141 * @newsb new superblock which needs filled in
142 * @sb_parse_opts_str:
143 * Parse a string of security data filling in the opts structure
144 * @options string containing all mount options known by the LSM
145 * @opts binary data structure usable by the LSM
146 * @dentry_init_security:
147 * Compute a context for a dentry as the inode is not yet available
148 * since NFSv4 has no label backed by an EA anyway.
149 * @dentry dentry to use in calculating the context.
150 * @mode mode used to determine resource type.
151 * @name name of the last path component used to create file
152 * @ctx pointer to place the pointer to the resulting context in.
153 * @ctxlen point to place the length of the resulting context.
154 *
155 *
156 * Security hooks for inode operations.
157 *
158 * @inode_alloc_security:
159 * Allocate and attach a security structure to @inode->i_security. The
160 * i_security field is initialized to NULL when the inode structure is
161 * allocated.
162 * @inode contains the inode structure.
163 * Return 0 if operation was successful.
164 * @inode_free_security:
165 * @inode contains the inode structure.
166 * Deallocate the inode security structure and set @inode->i_security to
167 * NULL.
168 * @inode_init_security:
169 * Obtain the security attribute name suffix and value to set on a newly
170 * created inode and set up the incore security field for the new inode.
171 * This hook is called by the fs code as part of the inode creation
172 * transaction and provides for atomic labeling of the inode, unlike
173 * the post_create/mkdir/... hooks called by the VFS. The hook function
174 * is expected to allocate the name and value via kmalloc, with the caller
175 * being responsible for calling kfree after using them.
176 * If the security module does not use security attributes or does
177 * not wish to put a security attribute on this particular inode,
178 * then it should return -EOPNOTSUPP to skip this processing.
179 * @inode contains the inode structure of the newly created inode.
180 * @dir contains the inode structure of the parent directory.
181 * @qstr contains the last path component of the new object
182 * @name will be set to the allocated name suffix (e.g. selinux).
183 * @value will be set to the allocated attribute value.
184 * @len will be set to the length of the value.
185 * Returns 0 if @name and @value have been successfully set,
186 * -EOPNOTSUPP if no security attribute is needed, or
187 * -ENOMEM on memory allocation failure.
188 * @inode_create:
189 * Check permission to create a regular file.
190 * @dir contains inode structure of the parent of the new file.
191 * @dentry contains the dentry structure for the file to be created.
192 * @mode contains the file mode of the file to be created.
193 * Return 0 if permission is granted.
194 * @inode_link:
195 * Check permission before creating a new hard link to a file.
196 * @old_dentry contains the dentry structure for an existing
197 * link to the file.
198 * @dir contains the inode structure of the parent directory
199 * of the new link.
200 * @new_dentry contains the dentry structure for the new link.
201 * Return 0 if permission is granted.
202 * @path_link:
203 * Check permission before creating a new hard link to a file.
204 * @old_dentry contains the dentry structure for an existing link
205 * to the file.
206 * @new_dir contains the path structure of the parent directory of
207 * the new link.
208 * @new_dentry contains the dentry structure for the new link.
209 * Return 0 if permission is granted.
210 * @inode_unlink:
211 * Check the permission to remove a hard link to a file.
212 * @dir contains the inode structure of parent directory of the file.
213 * @dentry contains the dentry structure for file to be unlinked.
214 * Return 0 if permission is granted.
215 * @path_unlink:
216 * Check the permission to remove a hard link to a file.
217 * @dir contains the path structure of parent directory of the file.
218 * @dentry contains the dentry structure for file to be unlinked.
219 * Return 0 if permission is granted.
220 * @inode_symlink:
221 * Check the permission to create a symbolic link to a file.
222 * @dir contains the inode structure of parent directory of
223 * the symbolic link.
224 * @dentry contains the dentry structure of the symbolic link.
225 * @old_name contains the pathname of file.
226 * Return 0 if permission is granted.
227 * @path_symlink:
228 * Check the permission to create a symbolic link to a file.
229 * @dir contains the path structure of parent directory of
230 * the symbolic link.
231 * @dentry contains the dentry structure of the symbolic link.
232 * @old_name contains the pathname of file.
233 * Return 0 if permission is granted.
234 * @inode_mkdir:
235 * Check permissions to create a new directory in the existing directory
236 * associated with inode structure @dir.
237 * @dir contains the inode structure of parent of the directory
238 * to be created.
239 * @dentry contains the dentry structure of new directory.
240 * @mode contains the mode of new directory.
241 * Return 0 if permission is granted.
242 * @path_mkdir:
243 * Check permissions to create a new directory in the existing directory
244 * associated with path structure @path.
245 * @dir contains the path structure of parent of the directory
246 * to be created.
247 * @dentry contains the dentry structure of new directory.
248 * @mode contains the mode of new directory.
249 * Return 0 if permission is granted.
250 * @inode_rmdir:
251 * Check the permission to remove a directory.
252 * @dir contains the inode structure of parent of the directory
253 * to be removed.
254 * @dentry contains the dentry structure of directory to be removed.
255 * Return 0 if permission is granted.
256 * @path_rmdir:
257 * Check the permission to remove a directory.
258 * @dir contains the path structure of parent of the directory to be
259 * removed.
260 * @dentry contains the dentry structure of directory to be removed.
261 * Return 0 if permission is granted.
262 * @inode_mknod:
263 * Check permissions when creating a special file (or a socket or a fifo
264 * file created via the mknod system call). Note that if mknod operation
265 * is being done for a regular file, then the create hook will be called
266 * and not this hook.
267 * @dir contains the inode structure of parent of the new file.
268 * @dentry contains the dentry structure of the new file.
269 * @mode contains the mode of the new file.
270 * @dev contains the device number.
271 * Return 0 if permission is granted.
272 * @path_mknod:
273 * Check permissions when creating a file. Note that this hook is called
274 * even if mknod operation is being done for a regular file.
275 * @dir contains the path structure of parent of the new file.
276 * @dentry contains the dentry structure of the new file.
277 * @mode contains the mode of the new file.
278 * @dev contains the undecoded device number. Use new_decode_dev() to get
279 * the decoded device number.
280 * Return 0 if permission is granted.
281 * @inode_rename:
282 * Check for permission to rename a file or directory.
283 * @old_dir contains the inode structure for parent of the old link.
284 * @old_dentry contains the dentry structure of the old link.
285 * @new_dir contains the inode structure for parent of the new link.
286 * @new_dentry contains the dentry structure of the new link.
287 * Return 0 if permission is granted.
288 * @path_rename:
289 * Check for permission to rename a file or directory.
290 * @old_dir contains the path structure for parent of the old link.
291 * @old_dentry contains the dentry structure of the old link.
292 * @new_dir contains the path structure for parent of the new link.
293 * @new_dentry contains the dentry structure of the new link.
294 * Return 0 if permission is granted.
295 * @path_chmod:
296 * Check for permission to change DAC's permission of a file or directory.
297 * @dentry contains the dentry structure.
298 * @mnt contains the vfsmnt structure.
299 * @mode contains DAC's mode.
300 * Return 0 if permission is granted.
301 * @path_chown:
302 * Check for permission to change owner/group of a file or directory.
303 * @path contains the path structure.
304 * @uid contains new owner's ID.
305 * @gid contains new group's ID.
306 * Return 0 if permission is granted.
307 * @path_chroot:
308 * Check for permission to change root directory.
309 * @path contains the path structure.
310 * Return 0 if permission is granted.
311 * @inode_readlink:
312 * Check the permission to read the symbolic link.
313 * @dentry contains the dentry structure for the file link.
314 * Return 0 if permission is granted.
315 * @inode_follow_link:
316 * Check permission to follow a symbolic link when looking up a pathname.
317 * @dentry contains the dentry structure for the link.
318 * @inode contains the inode, which itself is not stable in RCU-walk
319 * @rcu indicates whether we are in RCU-walk mode.
320 * Return 0 if permission is granted.
321 * @inode_permission:
322 * Check permission before accessing an inode. This hook is called by the
323 * existing Linux permission function, so a security module can use it to
324 * provide additional checking for existing Linux permission checks.
325 * Notice that this hook is called when a file is opened (as well as many
326 * other operations), whereas the file_security_ops permission hook is
327 * called when the actual read/write operations are performed.
328 * @inode contains the inode structure to check.
329 * @mask contains the permission mask.
330 * Return 0 if permission is granted.
331 * @inode_setattr:
332 * Check permission before setting file attributes. Note that the kernel
333 * call to notify_change is performed from several locations, whenever
334 * file attributes change (such as when a file is truncated, chown/chmod
335 * operations, transferring disk quotas, etc).
336 * @dentry contains the dentry structure for the file.
337 * @attr is the iattr structure containing the new file attributes.
338 * Return 0 if permission is granted.
339 * @path_truncate:
340 * Check permission before truncating a file.
341 * @path contains the path structure for the file.
342 * Return 0 if permission is granted.
343 * @inode_getattr:
344 * Check permission before obtaining file attributes.
345 * @mnt is the vfsmount where the dentry was looked up
346 * @dentry contains the dentry structure for the file.
347 * Return 0 if permission is granted.
348 * @inode_setxattr:
349 * Check permission before setting the extended attributes
350 * @value identified by @name for @dentry.
351 * Return 0 if permission is granted.
352 * @inode_post_setxattr:
353 * Update inode security field after successful setxattr operation.
354 * @value identified by @name for @dentry.
355 * @inode_getxattr:
356 * Check permission before obtaining the extended attributes
357 * identified by @name for @dentry.
358 * Return 0 if permission is granted.
359 * @inode_listxattr:
360 * Check permission before obtaining the list of extended attribute
361 * names for @dentry.
362 * Return 0 if permission is granted.
363 * @inode_removexattr:
364 * Check permission before removing the extended attribute
365 * identified by @name for @dentry.
366 * Return 0 if permission is granted.
367 * @inode_getsecurity:
368 * Retrieve a copy of the extended attribute representation of the
369 * security label associated with @name for @inode via @buffer. Note that
370 * @name is the remainder of the attribute name after the security prefix
371 * has been removed. @alloc is used to specify of the call should return a
372 * value via the buffer or just the value length Return size of buffer on
373 * success.
374 * @inode_setsecurity:
375 * Set the security label associated with @name for @inode from the
376 * extended attribute value @value. @size indicates the size of the
377 * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
378 * Note that @name is the remainder of the attribute name after the
379 * security. prefix has been removed.
380 * Return 0 on success.
381 * @inode_listsecurity:
382 * Copy the extended attribute names for the security labels
383 * associated with @inode into @buffer. The maximum size of @buffer
384 * is specified by @buffer_size. @buffer may be NULL to request
385 * the size of the buffer required.
386 * Returns number of bytes used/required on success.
387 * @inode_need_killpriv:
388 * Called when an inode has been changed.
389 * @dentry is the dentry being changed.
390 * Return <0 on error to abort the inode change operation.
391 * Return 0 if inode_killpriv does not need to be called.
392 * Return >0 if inode_killpriv does need to be called.
393 * @inode_killpriv:
394 * The setuid bit is being removed. Remove similar security labels.
395 * Called with the dentry->d_inode->i_mutex held.
396 * @dentry is the dentry being changed.
397 * Return 0 on success. If error is returned, then the operation
398 * causing setuid bit removal is failed.
399 * @inode_getsecid:
400 * Get the secid associated with the node.
401 * @inode contains a pointer to the inode.
402 * @secid contains a pointer to the location where result will be saved.
403 * In case of failure, @secid will be set to zero.
404 *
405 * Security hooks for file operations
406 *
407 * @file_permission:
408 * Check file permissions before accessing an open file. This hook is
409 * called by various operations that read or write files. A security
410 * module can use this hook to perform additional checking on these
411 * operations, e.g. to revalidate permissions on use to support privilege
412 * bracketing or policy changes. Notice that this hook is used when the
413 * actual read/write operations are performed, whereas the
414 * inode_security_ops hook is called when a file is opened (as well as
415 * many other operations).
416 * Caveat: Although this hook can be used to revalidate permissions for
417 * various system call operations that read or write files, it does not
418 * address the revalidation of permissions for memory-mapped files.
419 * Security modules must handle this separately if they need such
420 * revalidation.
421 * @file contains the file structure being accessed.
422 * @mask contains the requested permissions.
423 * Return 0 if permission is granted.
424 * @file_alloc_security:
425 * Allocate and attach a security structure to the file->f_security field.
426 * The security field is initialized to NULL when the structure is first
427 * created.
428 * @file contains the file structure to secure.
429 * Return 0 if the hook is successful and permission is granted.
430 * @file_free_security:
431 * Deallocate and free any security structures stored in file->f_security.
432 * @file contains the file structure being modified.
433 * @file_ioctl:
434 * @file contains the file structure.
435 * @cmd contains the operation to perform.
436 * @arg contains the operational arguments.
437 * Check permission for an ioctl operation on @file. Note that @arg
438 * sometimes represents a user space pointer; in other cases, it may be a
439 * simple integer value. When @arg represents a user space pointer, it
440 * should never be used by the security module.
441 * Return 0 if permission is granted.
442 * @mmap_addr :
443 * Check permissions for a mmap operation at @addr.
444 * @addr contains virtual address that will be used for the operation.
445 * Return 0 if permission is granted.
446 * @mmap_file :
447 * Check permissions for a mmap operation. The @file may be NULL, e.g.
448 * if mapping anonymous memory.
449 * @file contains the file structure for file to map (may be NULL).
450 * @reqprot contains the protection requested by the application.
451 * @prot contains the protection that will be applied by the kernel.
452 * @flags contains the operational flags.
453 * Return 0 if permission is granted.
454 * @file_mprotect:
455 * Check permissions before changing memory access permissions.
456 * @vma contains the memory region to modify.
457 * @reqprot contains the protection requested by the application.
458 * @prot contains the protection that will be applied by the kernel.
459 * Return 0 if permission is granted.
460 * @file_lock:
461 * Check permission before performing file locking operations.
462 * Note: this hook mediates both flock and fcntl style locks.
463 * @file contains the file structure.
464 * @cmd contains the posix-translated lock operation to perform
465 * (e.g. F_RDLCK, F_WRLCK).
466 * Return 0 if permission is granted.
467 * @file_fcntl:
468 * Check permission before allowing the file operation specified by @cmd
469 * from being performed on the file @file. Note that @arg sometimes
470 * represents a user space pointer; in other cases, it may be a simple
471 * integer value. When @arg represents a user space pointer, it should
472 * never be used by the security module.
473 * @file contains the file structure.
474 * @cmd contains the operation to be performed.
475 * @arg contains the operational arguments.
476 * Return 0 if permission is granted.
477 * @file_set_fowner:
478 * Save owner security information (typically from current->security) in
479 * file->f_security for later use by the send_sigiotask hook.
480 * @file contains the file structure to update.
481 * Return 0 on success.
482 * @file_send_sigiotask:
483 * Check permission for the file owner @fown to send SIGIO or SIGURG to the
484 * process @tsk. Note that this hook is sometimes called from interrupt.
485 * Note that the fown_struct, @fown, is never outside the context of a
486 * struct file, so the file structure (and associated security information)
487 * can always be obtained:
488 * container_of(fown, struct file, f_owner)
489 * @tsk contains the structure of task receiving signal.
490 * @fown contains the file owner information.
491 * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
492 * Return 0 if permission is granted.
493 * @file_receive:
494 * This hook allows security modules to control the ability of a process
495 * to receive an open file descriptor via socket IPC.
496 * @file contains the file structure being received.
497 * Return 0 if permission is granted.
498 * @file_open
499 * Save open-time permission checking state for later use upon
500 * file_permission, and recheck access if anything has changed
501 * since inode_permission.
502 *
503 * Security hooks for task operations.
504 *
505 * @task_create:
506 * Check permission before creating a child process. See the clone(2)
507 * manual page for definitions of the @clone_flags.
508 * @clone_flags contains the flags indicating what should be shared.
509 * Return 0 if permission is granted.
510 * @task_free:
511 * @task task being freed
512 * Handle release of task-related resources. (Note that this can be called
513 * from interrupt context.)
514 * @cred_alloc_blank:
515 * @cred points to the credentials.
516 * @gfp indicates the atomicity of any memory allocations.
517 * Only allocate sufficient memory and attach to @cred such that
518 * cred_transfer() will not get ENOMEM.
519 * @cred_free:
520 * @cred points to the credentials.
521 * Deallocate and clear the cred->security field in a set of credentials.
522 * @cred_prepare:
523 * @new points to the new credentials.
524 * @old points to the original credentials.
525 * @gfp indicates the atomicity of any memory allocations.
526 * Prepare a new set of credentials by copying the data from the old set.
527 * @cred_transfer:
528 * @new points to the new credentials.
529 * @old points to the original credentials.
530 * Transfer data from original creds to new creds
531 * @kernel_act_as:
532 * Set the credentials for a kernel service to act as (subjective context).
533 * @new points to the credentials to be modified.
534 * @secid specifies the security ID to be set
535 * The current task must be the one that nominated @secid.
536 * Return 0 if successful.
537 * @kernel_create_files_as:
538 * Set the file creation context in a set of credentials to be the same as
539 * the objective context of the specified inode.
540 * @new points to the credentials to be modified.
541 * @inode points to the inode to use as a reference.
542 * The current task must be the one that nominated @inode.
543 * Return 0 if successful.
544 * @kernel_fw_from_file:
545 * Load firmware from userspace (not called for built-in firmware).
546 * @file contains the file structure pointing to the file containing
547 * the firmware to load. This argument will be NULL if the firmware
548 * was loaded via the uevent-triggered blob-based interface exposed
549 * by CONFIG_FW_LOADER_USER_HELPER.
550 * @buf pointer to buffer containing firmware contents.
551 * @size length of the firmware contents.
552 * Return 0 if permission is granted.
553 * @kernel_module_request:
554 * Ability to trigger the kernel to automatically upcall to userspace for
555 * userspace to load a kernel module with the given name.
556 * @kmod_name name of the module requested by the kernel
557 * Return 0 if successful.
558 * @kernel_module_from_file:
559 * Load a kernel module from userspace.
560 * @file contains the file structure pointing to the file containing
561 * the kernel module to load. If the module is being loaded from a blob,
562 * this argument will be NULL.
563 * Return 0 if permission is granted.
564 * @task_fix_setuid:
565 * Update the module's state after setting one or more of the user
566 * identity attributes of the current process. The @flags parameter
567 * indicates which of the set*uid system calls invoked this hook. If
568 * @new is the set of credentials that will be installed. Modifications
569 * should be made to this rather than to @current->cred.
570 * @old is the set of credentials that are being replaces
571 * @flags contains one of the LSM_SETID_* values.
572 * Return 0 on success.
573 * @task_setpgid:
574 * Check permission before setting the process group identifier of the
575 * process @p to @pgid.
576 * @p contains the task_struct for process being modified.
577 * @pgid contains the new pgid.
578 * Return 0 if permission is granted.
579 * @task_getpgid:
580 * Check permission before getting the process group identifier of the
581 * process @p.
582 * @p contains the task_struct for the process.
583 * Return 0 if permission is granted.
584 * @task_getsid:
585 * Check permission before getting the session identifier of the process
586 * @p.
587 * @p contains the task_struct for the process.
588 * Return 0 if permission is granted.
589 * @task_getsecid:
590 * Retrieve the security identifier of the process @p.
591 * @p contains the task_struct for the process and place is into @secid.
592 * In case of failure, @secid will be set to zero.
593 *
594 * @task_setnice:
595 * Check permission before setting the nice value of @p to @nice.
596 * @p contains the task_struct of process.
597 * @nice contains the new nice value.
598 * Return 0 if permission is granted.
599 * @task_setioprio
600 * Check permission before setting the ioprio value of @p to @ioprio.
601 * @p contains the task_struct of process.
602 * @ioprio contains the new ioprio value
603 * Return 0 if permission is granted.
604 * @task_getioprio
605 * Check permission before getting the ioprio value of @p.
606 * @p contains the task_struct of process.
607 * Return 0 if permission is granted.
608 * @task_setrlimit:
609 * Check permission before setting the resource limits of the current
610 * process for @resource to @new_rlim. The old resource limit values can
611 * be examined by dereferencing (current->signal->rlim + resource).
612 * @resource contains the resource whose limit is being set.
613 * @new_rlim contains the new limits for @resource.
614 * Return 0 if permission is granted.
615 * @task_setscheduler:
616 * Check permission before setting scheduling policy and/or parameters of
617 * process @p based on @policy and @lp.
618 * @p contains the task_struct for process.
619 * @policy contains the scheduling policy.
620 * @lp contains the scheduling parameters.
621 * Return 0 if permission is granted.
622 * @task_getscheduler:
623 * Check permission before obtaining scheduling information for process
624 * @p.
625 * @p contains the task_struct for process.
626 * Return 0 if permission is granted.
627 * @task_movememory
628 * Check permission before moving memory owned by process @p.
629 * @p contains the task_struct for process.
630 * Return 0 if permission is granted.
631 * @task_kill:
632 * Check permission before sending signal @sig to @p. @info can be NULL,
633 * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
634 * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
635 * from the kernel and should typically be permitted.
636 * SIGIO signals are handled separately by the send_sigiotask hook in
637 * file_security_ops.
638 * @p contains the task_struct for process.
639 * @info contains the signal information.
640 * @sig contains the signal value.
641 * @secid contains the sid of the process where the signal originated
642 * Return 0 if permission is granted.
643 * @task_wait:
644 * Check permission before allowing a process to reap a child process @p
645 * and collect its status information.
646 * @p contains the task_struct for process.
647 * Return 0 if permission is granted.
648 * @task_prctl:
649 * Check permission before performing a process control operation on the
650 * current process.
651 * @option contains the operation.
652 * @arg2 contains a argument.
653 * @arg3 contains a argument.
654 * @arg4 contains a argument.
655 * @arg5 contains a argument.
656 * Return -ENOSYS if no-one wanted to handle this op, any other value to
657 * cause prctl() to return immediately with that value.
658 * @task_to_inode:
659 * Set the security attributes for an inode based on an associated task's
660 * security attributes, e.g. for /proc/pid inodes.
661 * @p contains the task_struct for the task.
662 * @inode contains the inode structure for the inode.
663 *
664 * Security hooks for Netlink messaging.
665 *
666 * @netlink_send:
667 * Save security information for a netlink message so that permission
668 * checking can be performed when the message is processed. The security
669 * information can be saved using the eff_cap field of the
670 * netlink_skb_parms structure. Also may be used to provide fine
671 * grained control over message transmission.
672 * @sk associated sock of task sending the message.
673 * @skb contains the sk_buff structure for the netlink message.
674 * Return 0 if the information was successfully saved and message
675 * is allowed to be transmitted.
676 *
677 * Security hooks for Unix domain networking.
678 *
679 * @unix_stream_connect:
680 * Check permissions before establishing a Unix domain stream connection
681 * between @sock and @other.
682 * @sock contains the sock structure.
683 * @other contains the peer sock structure.
684 * @newsk contains the new sock structure.
685 * Return 0 if permission is granted.
686 * @unix_may_send:
687 * Check permissions before connecting or sending datagrams from @sock to
688 * @other.
689 * @sock contains the socket structure.
690 * @other contains the peer socket structure.
691 * Return 0 if permission is granted.
692 *
693 * The @unix_stream_connect and @unix_may_send hooks were necessary because
694 * Linux provides an alternative to the conventional file name space for Unix
695 * domain sockets. Whereas binding and connecting to sockets in the file name
696 * space is mediated by the typical file permissions (and caught by the mknod
697 * and permission hooks in inode_security_ops), binding and connecting to
698 * sockets in the abstract name space is completely unmediated. Sufficient
699 * control of Unix domain sockets in the abstract name space isn't possible
700 * using only the socket layer hooks, since we need to know the actual target
701 * socket, which is not looked up until we are inside the af_unix code.
702 *
703 * Security hooks for socket operations.
704 *
705 * @socket_create:
706 * Check permissions prior to creating a new socket.
707 * @family contains the requested protocol family.
708 * @type contains the requested communications type.
709 * @protocol contains the requested protocol.
710 * @kern set to 1 if a kernel socket.
711 * Return 0 if permission is granted.
712 * @socket_post_create:
713 * This hook allows a module to update or allocate a per-socket security
714 * structure. Note that the security field was not added directly to the
715 * socket structure, but rather, the socket security information is stored
716 * in the associated inode. Typically, the inode alloc_security hook will
717 * allocate and and attach security information to
718 * sock->inode->i_security. This hook may be used to update the
719 * sock->inode->i_security field with additional information that wasn't
720 * available when the inode was allocated.
721 * @sock contains the newly created socket structure.
722 * @family contains the requested protocol family.
723 * @type contains the requested communications type.
724 * @protocol contains the requested protocol.
725 * @kern set to 1 if a kernel socket.
726 * @socket_bind:
727 * Check permission before socket protocol layer bind operation is
728 * performed and the socket @sock is bound to the address specified in the
729 * @address parameter.
730 * @sock contains the socket structure.
731 * @address contains the address to bind to.
732 * @addrlen contains the length of address.
733 * Return 0 if permission is granted.
734 * @socket_connect:
735 * Check permission before socket protocol layer connect operation
736 * attempts to connect socket @sock to a remote address, @address.
737 * @sock contains the socket structure.
738 * @address contains the address of remote endpoint.
739 * @addrlen contains the length of address.
740 * Return 0 if permission is granted.
741 * @socket_listen:
742 * Check permission before socket protocol layer listen operation.
743 * @sock contains the socket structure.
744 * @backlog contains the maximum length for the pending connection queue.
745 * Return 0 if permission is granted.
746 * @socket_accept:
747 * Check permission before accepting a new connection. Note that the new
748 * socket, @newsock, has been created and some information copied to it,
749 * but the accept operation has not actually been performed.
750 * @sock contains the listening socket structure.
751 * @newsock contains the newly created server socket for connection.
752 * Return 0 if permission is granted.
753 * @socket_sendmsg:
754 * Check permission before transmitting a message to another socket.
755 * @sock contains the socket structure.
756 * @msg contains the message to be transmitted.
757 * @size contains the size of message.
758 * Return 0 if permission is granted.
759 * @socket_recvmsg:
760 * Check permission before receiving a message from a socket.
761 * @sock contains the socket structure.
762 * @msg contains the message structure.
763 * @size contains the size of message structure.
764 * @flags contains the operational flags.
765 * Return 0 if permission is granted.
766 * @socket_getsockname:
767 * Check permission before the local address (name) of the socket object
768 * @sock is retrieved.
769 * @sock contains the socket structure.
770 * Return 0 if permission is granted.
771 * @socket_getpeername:
772 * Check permission before the remote address (name) of a socket object
773 * @sock is retrieved.
774 * @sock contains the socket structure.
775 * Return 0 if permission is granted.
776 * @socket_getsockopt:
777 * Check permissions before retrieving the options associated with socket
778 * @sock.
779 * @sock contains the socket structure.
780 * @level contains the protocol level to retrieve option from.
781 * @optname contains the name of option to retrieve.
782 * Return 0 if permission is granted.
783 * @socket_setsockopt:
784 * Check permissions before setting the options associated with socket
785 * @sock.
786 * @sock contains the socket structure.
787 * @level contains the protocol level to set options for.
788 * @optname contains the name of the option to set.
789 * Return 0 if permission is granted.
790 * @socket_shutdown:
791 * Checks permission before all or part of a connection on the socket
792 * @sock is shut down.
793 * @sock contains the socket structure.
794 * @how contains the flag indicating how future sends and receives
795 * are handled.
796 * Return 0 if permission is granted.
797 * @socket_sock_rcv_skb:
798 * Check permissions on incoming network packets. This hook is distinct
799 * from Netfilter's IP input hooks since it is the first time that the
800 * incoming sk_buff @skb has been associated with a particular socket, @sk.
801 * Must not sleep inside this hook because some callers hold spinlocks.
802 * @sk contains the sock (not socket) associated with the incoming sk_buff.
803 * @skb contains the incoming network data.
804 * @socket_getpeersec_stream:
805 * This hook allows the security module to provide peer socket security
806 * state for unix or connected tcp sockets to userspace via getsockopt
807 * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
808 * socket is associated with an ipsec SA.
809 * @sock is the local socket.
810 * @optval userspace memory where the security state is to be copied.
811 * @optlen userspace int where the module should copy the actual length
812 * of the security state.
813 * @len as input is the maximum length to copy to userspace provided
814 * by the caller.
815 * Return 0 if all is well, otherwise, typical getsockopt return
816 * values.
817 * @socket_getpeersec_dgram:
818 * This hook allows the security module to provide peer socket security
819 * state for udp sockets on a per-packet basis to userspace via
820 * getsockopt SO_GETPEERSEC. The application must first have indicated
821 * the IP_PASSSEC option via getsockopt. It can then retrieve the
822 * security state returned by this hook for a packet via the SCM_SECURITY
823 * ancillary message type.
824 * @skb is the skbuff for the packet being queried
825 * @secdata is a pointer to a buffer in which to copy the security data
826 * @seclen is the maximum length for @secdata
827 * Return 0 on success, error on failure.
828 * @sk_alloc_security:
829 * Allocate and attach a security structure to the sk->sk_security field,
830 * which is used to copy security attributes between local stream sockets.
831 * @sk_free_security:
832 * Deallocate security structure.
833 * @sk_clone_security:
834 * Clone/copy security structure.
835 * @sk_getsecid:
836 * Retrieve the LSM-specific secid for the sock to enable caching
837 * of network authorizations.
838 * @sock_graft:
839 * Sets the socket's isec sid to the sock's sid.
840 * @inet_conn_request:
841 * Sets the openreq's sid to socket's sid with MLS portion taken
842 * from peer sid.
843 * @inet_csk_clone:
844 * Sets the new child socket's sid to the openreq sid.
845 * @inet_conn_established:
846 * Sets the connection's peersid to the secmark on skb.
847 * @secmark_relabel_packet:
848 * check if the process should be allowed to relabel packets to
849 * the given secid
850 * @security_secmark_refcount_inc
851 * tells the LSM to increment the number of secmark labeling rules loaded
852 * @security_secmark_refcount_dec
853 * tells the LSM to decrement the number of secmark labeling rules loaded
854 * @req_classify_flow:
855 * Sets the flow's sid to the openreq sid.
856 * @tun_dev_alloc_security:
857 * This hook allows a module to allocate a security structure for a TUN
858 * device.
859 * @security pointer to a security structure pointer.
860 * Returns a zero on success, negative values on failure.
861 * @tun_dev_free_security:
862 * This hook allows a module to free the security structure for a TUN
863 * device.
864 * @security pointer to the TUN device's security structure
865 * @tun_dev_create:
866 * Check permissions prior to creating a new TUN device.
867 * @tun_dev_attach_queue:
868 * Check permissions prior to attaching to a TUN device queue.
869 * @security pointer to the TUN device's security structure.
870 * @tun_dev_attach:
871 * This hook can be used by the module to update any security state
872 * associated with the TUN device's sock structure.
873 * @sk contains the existing sock structure.
874 * @security pointer to the TUN device's security structure.
875 * @tun_dev_open:
876 * This hook can be used by the module to update any security state
877 * associated with the TUN device's security structure.
878 * @security pointer to the TUN devices's security structure.
879 *
880 * Security hooks for XFRM operations.
881 *
882 * @xfrm_policy_alloc_security:
883 * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
884 * Database used by the XFRM system.
885 * @sec_ctx contains the security context information being provided by
886 * the user-level policy update program (e.g., setkey).
887 * Allocate a security structure to the xp->security field; the security
888 * field is initialized to NULL when the xfrm_policy is allocated.
889 * Return 0 if operation was successful (memory to allocate, legal context)
890 * @gfp is to specify the context for the allocation
891 * @xfrm_policy_clone_security:
892 * @old_ctx contains an existing xfrm_sec_ctx.
893 * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
894 * Allocate a security structure in new_ctxp that contains the
895 * information from the old_ctx structure.
896 * Return 0 if operation was successful (memory to allocate).
897 * @xfrm_policy_free_security:
898 * @ctx contains the xfrm_sec_ctx
899 * Deallocate xp->security.
900 * @xfrm_policy_delete_security:
901 * @ctx contains the xfrm_sec_ctx.
902 * Authorize deletion of xp->security.
903 * @xfrm_state_alloc:
904 * @x contains the xfrm_state being added to the Security Association
905 * Database by the XFRM system.
906 * @sec_ctx contains the security context information being provided by
907 * the user-level SA generation program (e.g., setkey or racoon).
908 * Allocate a security structure to the x->security field; the security
909 * field is initialized to NULL when the xfrm_state is allocated. Set the
910 * context to correspond to sec_ctx. Return 0 if operation was successful
911 * (memory to allocate, legal context).
912 * @xfrm_state_alloc_acquire:
913 * @x contains the xfrm_state being added to the Security Association
914 * Database by the XFRM system.
915 * @polsec contains the policy's security context.
916 * @secid contains the secid from which to take the mls portion of the
917 * context.
918 * Allocate a security structure to the x->security field; the security
919 * field is initialized to NULL when the xfrm_state is allocated. Set the
920 * context to correspond to secid. Return 0 if operation was successful
921 * (memory to allocate, legal context).
922 * @xfrm_state_free_security:
923 * @x contains the xfrm_state.
924 * Deallocate x->security.
925 * @xfrm_state_delete_security:
926 * @x contains the xfrm_state.
927 * Authorize deletion of x->security.
928 * @xfrm_policy_lookup:
929 * @ctx contains the xfrm_sec_ctx for which the access control is being
930 * checked.
931 * @fl_secid contains the flow security label that is used to authorize
932 * access to the policy xp.
933 * @dir contains the direction of the flow (input or output).
934 * Check permission when a flow selects a xfrm_policy for processing
935 * XFRMs on a packet. The hook is called when selecting either a
936 * per-socket policy or a generic xfrm policy.
937 * Return 0 if permission is granted, -ESRCH otherwise, or -errno
938 * on other errors.
939 * @xfrm_state_pol_flow_match:
940 * @x contains the state to match.
941 * @xp contains the policy to check for a match.
942 * @fl contains the flow to check for a match.
943 * Return 1 if there is a match.
944 * @xfrm_decode_session:
945 * @skb points to skb to decode.
946 * @secid points to the flow key secid to set.
947 * @ckall says if all xfrms used should be checked for same secid.
948 * Return 0 if ckall is zero or all xfrms used have the same secid.
949 *
950 * Security hooks affecting all Key Management operations
951 *
952 * @key_alloc:
953 * Permit allocation of a key and assign security data. Note that key does
954 * not have a serial number assigned at this point.
955 * @key points to the key.
956 * @flags is the allocation flags
957 * Return 0 if permission is granted, -ve error otherwise.
958 * @key_free:
959 * Notification of destruction; free security data.
960 * @key points to the key.
961 * No return value.
962 * @key_permission:
963 * See whether a specific operational right is granted to a process on a
964 * key.
965 * @key_ref refers to the key (key pointer + possession attribute bit).
966 * @cred points to the credentials to provide the context against which to
967 * evaluate the security data on the key.
968 * @perm describes the combination of permissions required of this key.
969 * Return 0 if permission is granted, -ve error otherwise.
970 * @key_getsecurity:
971 * Get a textual representation of the security context attached to a key
972 * for the purposes of honouring KEYCTL_GETSECURITY. This function
973 * allocates the storage for the NUL-terminated string and the caller
974 * should free it.
975 * @key points to the key to be queried.
976 * @_buffer points to a pointer that should be set to point to the
977 * resulting string (if no label or an error occurs).
978 * Return the length of the string (including terminating NUL) or -ve if
979 * an error.
980 * May also return 0 (and a NULL buffer pointer) if there is no label.
981 *
982 * Security hooks affecting all System V IPC operations.
983 *
984 * @ipc_permission:
985 * Check permissions for access to IPC
986 * @ipcp contains the kernel IPC permission structure
987 * @flag contains the desired (requested) permission set
988 * Return 0 if permission is granted.
989 * @ipc_getsecid:
990 * Get the secid associated with the ipc object.
991 * @ipcp contains the kernel IPC permission structure.
992 * @secid contains a pointer to the location where result will be saved.
993 * In case of failure, @secid will be set to zero.
994 *
995 * Security hooks for individual messages held in System V IPC message queues
996 * @msg_msg_alloc_security:
997 * Allocate and attach a security structure to the msg->security field.
998 * The security field is initialized to NULL when the structure is first
999 * created.
1000 * @msg contains the message structure to be modified.
1001 * Return 0 if operation was successful and permission is granted.
1002 * @msg_msg_free_security:
1003 * Deallocate the security structure for this message.
1004 * @msg contains the message structure to be modified.
1005 *
1006 * Security hooks for System V IPC Message Queues
1007 *
1008 * @msg_queue_alloc_security:
1009 * Allocate and attach a security structure to the
1010 * msq->q_perm.security field. The security field is initialized to
1011 * NULL when the structure is first created.
1012 * @msq contains the message queue structure to be modified.
1013 * Return 0 if operation was successful and permission is granted.
1014 * @msg_queue_free_security:
1015 * Deallocate security structure for this message queue.
1016 * @msq contains the message queue structure to be modified.
1017 * @msg_queue_associate:
1018 * Check permission when a message queue is requested through the
1019 * msgget system call. This hook is only called when returning the
1020 * message queue identifier for an existing message queue, not when a
1021 * new message queue is created.
1022 * @msq contains the message queue to act upon.
1023 * @msqflg contains the operation control flags.
1024 * Return 0 if permission is granted.
1025 * @msg_queue_msgctl:
1026 * Check permission when a message control operation specified by @cmd
1027 * is to be performed on the message queue @msq.
1028 * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
1029 * @msq contains the message queue to act upon. May be NULL.
1030 * @cmd contains the operation to be performed.
1031 * Return 0 if permission is granted.
1032 * @msg_queue_msgsnd:
1033 * Check permission before a message, @msg, is enqueued on the message
1034 * queue, @msq.
1035 * @msq contains the message queue to send message to.
1036 * @msg contains the message to be enqueued.
1037 * @msqflg contains operational flags.
1038 * Return 0 if permission is granted.
1039 * @msg_queue_msgrcv:
1040 * Check permission before a message, @msg, is removed from the message
1041 * queue, @msq. The @target task structure contains a pointer to the
1042 * process that will be receiving the message (not equal to the current
1043 * process when inline receives are being performed).
1044 * @msq contains the message queue to retrieve message from.
1045 * @msg contains the message destination.
1046 * @target contains the task structure for recipient process.
1047 * @type contains the type of message requested.
1048 * @mode contains the operational flags.
1049 * Return 0 if permission is granted.
1050 *
1051 * Security hooks for System V Shared Memory Segments
1052 *
1053 * @shm_alloc_security:
1054 * Allocate and attach a security structure to the shp->shm_perm.security
1055 * field. The security field is initialized to NULL when the structure is
1056 * first created.
1057 * @shp contains the shared memory structure to be modified.
1058 * Return 0 if operation was successful and permission is granted.
1059 * @shm_free_security:
1060 * Deallocate the security struct for this memory segment.
1061 * @shp contains the shared memory structure to be modified.
1062 * @shm_associate:
1063 * Check permission when a shared memory region is requested through the
1064 * shmget system call. This hook is only called when returning the shared
1065 * memory region identifier for an existing region, not when a new shared
1066 * memory region is created.
1067 * @shp contains the shared memory structure to be modified.
1068 * @shmflg contains the operation control flags.
1069 * Return 0 if permission is granted.
1070 * @shm_shmctl:
1071 * Check permission when a shared memory control operation specified by
1072 * @cmd is to be performed on the shared memory region @shp.
1073 * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
1074 * @shp contains shared memory structure to be modified.
1075 * @cmd contains the operation to be performed.
1076 * Return 0 if permission is granted.
1077 * @shm_shmat:
1078 * Check permissions prior to allowing the shmat system call to attach the
1079 * shared memory segment @shp to the data segment of the calling process.
1080 * The attaching address is specified by @shmaddr.
1081 * @shp contains the shared memory structure to be modified.
1082 * @shmaddr contains the address to attach memory region to.
1083 * @shmflg contains the operational flags.
1084 * Return 0 if permission is granted.
1085 *
1086 * Security hooks for System V Semaphores
1087 *
1088 * @sem_alloc_security:
1089 * Allocate and attach a security structure to the sma->sem_perm.security
1090 * field. The security field is initialized to NULL when the structure is
1091 * first created.
1092 * @sma contains the semaphore structure
1093 * Return 0 if operation was successful and permission is granted.
1094 * @sem_free_security:
1095 * deallocate security struct for this semaphore
1096 * @sma contains the semaphore structure.
1097 * @sem_associate:
1098 * Check permission when a semaphore is requested through the semget
1099 * system call. This hook is only called when returning the semaphore
1100 * identifier for an existing semaphore, not when a new one must be
1101 * created.
1102 * @sma contains the semaphore structure.
1103 * @semflg contains the operation control flags.
1104 * Return 0 if permission is granted.
1105 * @sem_semctl:
1106 * Check permission when a semaphore operation specified by @cmd is to be
1107 * performed on the semaphore @sma. The @sma may be NULL, e.g. for
1108 * IPC_INFO or SEM_INFO.
1109 * @sma contains the semaphore structure. May be NULL.
1110 * @cmd contains the operation to be performed.
1111 * Return 0 if permission is granted.
1112 * @sem_semop
1113 * Check permissions before performing operations on members of the
1114 * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
1115 * may be modified.
1116 * @sma contains the semaphore structure.
1117 * @sops contains the operations to perform.
1118 * @nsops contains the number of operations to perform.
1119 * @alter contains the flag indicating whether changes are to be made.
1120 * Return 0 if permission is granted.
1121 *
1122 * @binder_set_context_mgr
1123 * Check whether @mgr is allowed to be the binder context manager.
1124 * @mgr contains the task_struct for the task being registered.
1125 * Return 0 if permission is granted.
1126 * @binder_transaction
1127 * Check whether @from is allowed to invoke a binder transaction call
1128 * to @to.
1129 * @from contains the task_struct for the sending task.
1130 * @to contains the task_struct for the receiving task.
1131 * @binder_transfer_binder
1132 * Check whether @from is allowed to transfer a binder reference to @to.
1133 * @from contains the task_struct for the sending task.
1134 * @to contains the task_struct for the receiving task.
1135 * @binder_transfer_file
1136 * Check whether @from is allowed to transfer @file to @to.
1137 * @from contains the task_struct for the sending task.
1138 * @file contains the struct file being transferred.
1139 * @to contains the task_struct for the receiving task.
1140 *
1141 * @ptrace_access_check:
1142 * Check permission before allowing the current process to trace the
1143 * @child process.
1144 * Security modules may also want to perform a process tracing check
1145 * during an execve in the set_security or apply_creds hooks of
1146 * tracing check during an execve in the bprm_set_creds hook of
1147 * binprm_security_ops if the process is being traced and its security
1148 * attributes would be changed by the execve.
1149 * @child contains the task_struct structure for the target process.
1150 * @mode contains the PTRACE_MODE flags indicating the form of access.
1151 * Return 0 if permission is granted.
1152 * @ptrace_traceme:
1153 * Check that the @parent process has sufficient permission to trace the
1154 * current process before allowing the current process to present itself
1155 * to the @parent process for tracing.
1156 * @parent contains the task_struct structure for debugger process.
1157 * Return 0 if permission is granted.
1158 * @capget:
1159 * Get the @effective, @inheritable, and @permitted capability sets for
1160 * the @target process. The hook may also perform permission checking to
1161 * determine if the current process is allowed to see the capability sets
1162 * of the @target process.
1163 * @target contains the task_struct structure for target process.
1164 * @effective contains the effective capability set.
1165 * @inheritable contains the inheritable capability set.
1166 * @permitted contains the permitted capability set.
1167 * Return 0 if the capability sets were successfully obtained.
1168 * @capset:
1169 * Set the @effective, @inheritable, and @permitted capability sets for
1170 * the current process.
1171 * @new contains the new credentials structure for target process.
1172 * @old contains the current credentials structure for target process.
1173 * @effective contains the effective capability set.
1174 * @inheritable contains the inheritable capability set.
1175 * @permitted contains the permitted capability set.
1176 * Return 0 and update @new if permission is granted.
1177 * @capable:
1178 * Check whether the @tsk process has the @cap capability in the indicated
1179 * credentials.
1180 * @cred contains the credentials to use.
1181 * @ns contains the user namespace we want the capability in
1182 * @cap contains the capability <include/linux/capability.h>.
1183 * @audit: Whether to write an audit message or not
1184 * Return 0 if the capability is granted for @tsk.
1185 * @syslog:
1186 * Check permission before accessing the kernel message ring or changing
1187 * logging to the console.
1188 * See the syslog(2) manual page for an explanation of the @type values.
1189 * @type contains the type of action.
1190 * @from_file indicates the context of action (if it came from /proc).
1191 * Return 0 if permission is granted.
1192 * @settime:
1193 * Check permission to change the system time.
1194 * struct timespec and timezone are defined in include/linux/time.h
1195 * @ts contains new time
1196 * @tz contains new timezone
1197 * Return 0 if permission is granted.
1198 * @vm_enough_memory:
1199 * Check permissions for allocating a new virtual mapping.
1200 * @mm contains the mm struct it is being added to.
1201 * @pages contains the number of pages.
1202 * Return 0 if permission is granted.
1203 *
1204 * @ismaclabel:
1205 * Check if the extended attribute specified by @name
1206 * represents a MAC label. Returns 1 if name is a MAC
1207 * attribute otherwise returns 0.
1208 * @name full extended attribute name to check against
1209 * LSM as a MAC label.
1210 *
1211 * @secid_to_secctx:
1212 * Convert secid to security context. If secdata is NULL the length of
1213 * the result will be returned in seclen, but no secdata will be returned.
1214 * This does mean that the length could change between calls to check the
1215 * length and the next call which actually allocates and returns the
1216 * secdata.
1217 * @secid contains the security ID.
1218 * @secdata contains the pointer that stores the converted security
1219 * context.
1220 * @seclen pointer which contains the length of the data
1221 * @secctx_to_secid:
1222 * Convert security context to secid.
1223 * @secid contains the pointer to the generated security ID.
1224 * @secdata contains the security context.
1225 *
1226 * @release_secctx:
1227 * Release the security context.
1228 * @secdata contains the security context.
1229 * @seclen contains the length of the security context.
1230 *
1231 * Security hooks for Audit
1232 *
1233 * @audit_rule_init:
1234 * Allocate and initialize an LSM audit rule structure.
1235 * @field contains the required Audit action.
1236 * Fields flags are defined in include/linux/audit.h
1237 * @op contains the operator the rule uses.
1238 * @rulestr contains the context where the rule will be applied to.
1239 * @lsmrule contains a pointer to receive the result.
1240 * Return 0 if @lsmrule has been successfully set,
1241 * -EINVAL in case of an invalid rule.
1242 *
1243 * @audit_rule_known:
1244 * Specifies whether given @rule contains any fields related to
1245 * current LSM.
1246 * @rule contains the audit rule of interest.
1247 * Return 1 in case of relation found, 0 otherwise.
1248 *
1249 * @audit_rule_match:
1250 * Determine if given @secid matches a rule previously approved
1251 * by @audit_rule_known.
1252 * @secid contains the security id in question.
1253 * @field contains the field which relates to current LSM.
1254 * @op contains the operator that will be used for matching.
1255 * @rule points to the audit rule that will be checked against.
1256 * @actx points to the audit context associated with the check.
1257 * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
1258 *
1259 * @audit_rule_free:
1260 * Deallocate the LSM audit rule structure previously allocated by
1261 * audit_rule_init.
1262 * @rule contains the allocated rule
1263 *
1264 * @inode_notifysecctx:
1265 * Notify the security module of what the security context of an inode
1266 * should be. Initializes the incore security context managed by the
1267 * security module for this inode. Example usage: NFS client invokes
1268 * this hook to initialize the security context in its incore inode to the
1269 * value provided by the server for the file when the server returned the
1270 * file's attributes to the client.
1271 *
1272 * Must be called with inode->i_mutex locked.
1273 *
1274 * @inode we wish to set the security context of.
1275 * @ctx contains the string which we wish to set in the inode.
1276 * @ctxlen contains the length of @ctx.
1277 *
1278 * @inode_setsecctx:
1279 * Change the security context of an inode. Updates the
1280 * incore security context managed by the security module and invokes the
1281 * fs code as needed (via __vfs_setxattr_noperm) to update any backing
1282 * xattrs that represent the context. Example usage: NFS server invokes
1283 * this hook to change the security context in its incore inode and on the
1284 * backing filesystem to a value provided by the client on a SETATTR
1285 * operation.
1286 *
1287 * Must be called with inode->i_mutex locked.
1288 *
1289 * @dentry contains the inode we wish to set the security context of.
1290 * @ctx contains the string which we wish to set in the inode.
1291 * @ctxlen contains the length of @ctx.
1292 *
1293 * @inode_getsecctx:
1294 * On success, returns 0 and fills out @ctx and @ctxlen with the security
1295 * context for the given @inode.
1296 *
1297 * @inode we wish to get the security context of.
1298 * @ctx is a pointer in which to place the allocated security context.
1299 * @ctxlen points to the place to put the length of @ctx.
1300 * This is the main security structure.
1301 */
1302
1303union security_list_options {
1304 int (*binder_set_context_mgr)(struct task_struct *mgr);
1305 int (*binder_transaction)(struct task_struct *from,
1306 struct task_struct *to);
1307 int (*binder_transfer_binder)(struct task_struct *from,
1308 struct task_struct *to);
1309 int (*binder_transfer_file)(struct task_struct *from,
1310 struct task_struct *to,
1311 struct file *file);
1312
1313 int (*ptrace_access_check)(struct task_struct *child,
1314 unsigned int mode);
1315 int (*ptrace_traceme)(struct task_struct *parent);
1316 int (*capget)(struct task_struct *target, kernel_cap_t *effective,
1317 kernel_cap_t *inheritable, kernel_cap_t *permitted);
1318 int (*capset)(struct cred *new, const struct cred *old,
1319 const kernel_cap_t *effective,
1320 const kernel_cap_t *inheritable,
1321 const kernel_cap_t *permitted);
1322 int (*capable)(const struct cred *cred, struct user_namespace *ns,
1323 int cap, int audit);
1324 int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
1325 int (*quota_on)(struct dentry *dentry);
1326 int (*syslog)(int type);
1327 int (*settime)(const struct timespec *ts, const struct timezone *tz);
1328 int (*vm_enough_memory)(struct mm_struct *mm, long pages);
1329
1330 int (*bprm_set_creds)(struct linux_binprm *bprm);
1331 int (*bprm_check_security)(struct linux_binprm *bprm);
1332 int (*bprm_secureexec)(struct linux_binprm *bprm);
1333 void (*bprm_committing_creds)(struct linux_binprm *bprm);
1334 void (*bprm_committed_creds)(struct linux_binprm *bprm);
1335
1336 int (*sb_alloc_security)(struct super_block *sb);
1337 void (*sb_free_security)(struct super_block *sb);
1338 int (*sb_copy_data)(char *orig, char *copy);
1339 int (*sb_remount)(struct super_block *sb, void *data);
1340 int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
1341 int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
1342 int (*sb_statfs)(struct dentry *dentry);
1343 int (*sb_mount)(const char *dev_name, struct path *path,
1344 const char *type, unsigned long flags, void *data);
1345 int (*sb_umount)(struct vfsmount *mnt, int flags);
1346 int (*sb_pivotroot)(struct path *old_path, struct path *new_path);
1347 int (*sb_set_mnt_opts)(struct super_block *sb,
1348 struct security_mnt_opts *opts,
1349 unsigned long kern_flags,
1350 unsigned long *set_kern_flags);
1351 int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
1352 struct super_block *newsb);
1353 int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
1354 int (*dentry_init_security)(struct dentry *dentry, int mode,
1355 struct qstr *name, void **ctx,
1356 u32 *ctxlen);
1357
1358
1359#ifdef CONFIG_SECURITY_PATH
1360 int (*path_unlink)(struct path *dir, struct dentry *dentry);
1361 int (*path_mkdir)(struct path *dir, struct dentry *dentry,
1362 umode_t mode);
1363 int (*path_rmdir)(struct path *dir, struct dentry *dentry);
1364 int (*path_mknod)(struct path *dir, struct dentry *dentry,
1365 umode_t mode, unsigned int dev);
1366 int (*path_truncate)(struct path *path);
1367 int (*path_symlink)(struct path *dir, struct dentry *dentry,
1368 const char *old_name);
1369 int (*path_link)(struct dentry *old_dentry, struct path *new_dir,
1370 struct dentry *new_dentry);
1371 int (*path_rename)(struct path *old_dir, struct dentry *old_dentry,
1372 struct path *new_dir,
1373 struct dentry *new_dentry);
1374 int (*path_chmod)(struct path *path, umode_t mode);
1375 int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid);
1376 int (*path_chroot)(struct path *path);
1377#endif
1378
1379 int (*inode_alloc_security)(struct inode *inode);
1380 void (*inode_free_security)(struct inode *inode);
1381 int (*inode_init_security)(struct inode *inode, struct inode *dir,
1382 const struct qstr *qstr,
1383 const char **name, void **value,
1384 size_t *len);
1385 int (*inode_create)(struct inode *dir, struct dentry *dentry,
1386 umode_t mode);
1387 int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
1388 struct dentry *new_dentry);
1389 int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
1390 int (*inode_symlink)(struct inode *dir, struct dentry *dentry,
1391 const char *old_name);
1392 int (*inode_mkdir)(struct inode *dir, struct dentry *dentry,
1393 umode_t mode);
1394 int (*inode_rmdir)(struct inode *dir, struct dentry *dentry);
1395 int (*inode_mknod)(struct inode *dir, struct dentry *dentry,
1396 umode_t mode, dev_t dev);
1397 int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry,
1398 struct inode *new_dir,
1399 struct dentry *new_dentry);
1400 int (*inode_readlink)(struct dentry *dentry);
1401 int (*inode_follow_link)(struct dentry *dentry, struct inode *inode,
1402 bool rcu);
1403 int (*inode_permission)(struct inode *inode, int mask);
1404 int (*inode_setattr)(struct dentry *dentry, struct iattr *attr);
1405 int (*inode_getattr)(const struct path *path);
1406 int (*inode_setxattr)(struct dentry *dentry, const char *name,
1407 const void *value, size_t size, int flags);
1408 void (*inode_post_setxattr)(struct dentry *dentry, const char *name,
1409 const void *value, size_t size,
1410 int flags);
1411 int (*inode_getxattr)(struct dentry *dentry, const char *name);
1412 int (*inode_listxattr)(struct dentry *dentry);
1413 int (*inode_removexattr)(struct dentry *dentry, const char *name);
1414 int (*inode_need_killpriv)(struct dentry *dentry);
1415 int (*inode_killpriv)(struct dentry *dentry);
1416 int (*inode_getsecurity)(const struct inode *inode, const char *name,
1417 void **buffer, bool alloc);
1418 int (*inode_setsecurity)(struct inode *inode, const char *name,
1419 const void *value, size_t size,
1420 int flags);
1421 int (*inode_listsecurity)(struct inode *inode, char *buffer,
1422 size_t buffer_size);
1423 void (*inode_getsecid)(const struct inode *inode, u32 *secid);
1424
1425 int (*file_permission)(struct file *file, int mask);
1426 int (*file_alloc_security)(struct file *file);
1427 void (*file_free_security)(struct file *file);
1428 int (*file_ioctl)(struct file *file, unsigned int cmd,
1429 unsigned long arg);
1430 int (*mmap_addr)(unsigned long addr);
1431 int (*mmap_file)(struct file *file, unsigned long reqprot,
1432 unsigned long prot, unsigned long flags);
1433 int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
1434 unsigned long prot);
1435 int (*file_lock)(struct file *file, unsigned int cmd);
1436 int (*file_fcntl)(struct file *file, unsigned int cmd,
1437 unsigned long arg);
1438 void (*file_set_fowner)(struct file *file);
1439 int (*file_send_sigiotask)(struct task_struct *tsk,
1440 struct fown_struct *fown, int sig);
1441 int (*file_receive)(struct file *file);
1442 int (*file_open)(struct file *file, const struct cred *cred);
1443
1444 int (*task_create)(unsigned long clone_flags);
1445 void (*task_free)(struct task_struct *task);
1446 int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
1447 void (*cred_free)(struct cred *cred);
1448 int (*cred_prepare)(struct cred *new, const struct cred *old,
1449 gfp_t gfp);
1450 void (*cred_transfer)(struct cred *new, const struct cred *old);
1451 int (*kernel_act_as)(struct cred *new, u32 secid);
1452 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1453 int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
1454 int (*kernel_module_request)(char *kmod_name);
1455 int (*kernel_module_from_file)(struct file *file);
1456 int (*task_fix_setuid)(struct cred *new, const struct cred *old,
1457 int flags);
1458 int (*task_setpgid)(struct task_struct *p, pid_t pgid);
1459 int (*task_getpgid)(struct task_struct *p);
1460 int (*task_getsid)(struct task_struct *p);
1461 void (*task_getsecid)(struct task_struct *p, u32 *secid);
1462 int (*task_setnice)(struct task_struct *p, int nice);
1463 int (*task_setioprio)(struct task_struct *p, int ioprio);
1464 int (*task_getioprio)(struct task_struct *p);
1465 int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
1466 struct rlimit *new_rlim);
1467 int (*task_setscheduler)(struct task_struct *p);
1468 int (*task_getscheduler)(struct task_struct *p);
1469 int (*task_movememory)(struct task_struct *p);
1470 int (*task_kill)(struct task_struct *p, struct siginfo *info,
1471 int sig, u32 secid);
1472 int (*task_wait)(struct task_struct *p);
1473 int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
1474 unsigned long arg4, unsigned long arg5);
1475 void (*task_to_inode)(struct task_struct *p, struct inode *inode);
1476
1477 int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag);
1478 void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid);
1479
1480 int (*msg_msg_alloc_security)(struct msg_msg *msg);
1481 void (*msg_msg_free_security)(struct msg_msg *msg);
1482
1483 int (*msg_queue_alloc_security)(struct msg_queue *msq);
1484 void (*msg_queue_free_security)(struct msg_queue *msq);
1485 int (*msg_queue_associate)(struct msg_queue *msq, int msqflg);
1486 int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd);
1487 int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg,
1488 int msqflg);
1489 int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg,
1490 struct task_struct *target, long type,
1491 int mode);
1492
1493 int (*shm_alloc_security)(struct shmid_kernel *shp);
1494 void (*shm_free_security)(struct shmid_kernel *shp);
1495 int (*shm_associate)(struct shmid_kernel *shp, int shmflg);
1496 int (*shm_shmctl)(struct shmid_kernel *shp, int cmd);
1497 int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr,
1498 int shmflg);
1499
1500 int (*sem_alloc_security)(struct sem_array *sma);
1501 void (*sem_free_security)(struct sem_array *sma);
1502 int (*sem_associate)(struct sem_array *sma, int semflg);
1503 int (*sem_semctl)(struct sem_array *sma, int cmd);
1504 int (*sem_semop)(struct sem_array *sma, struct sembuf *sops,
1505 unsigned nsops, int alter);
1506
1507 int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
1508
1509 void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
1510
1511 int (*getprocattr)(struct task_struct *p, char *name, char **value);
1512 int (*setprocattr)(struct task_struct *p, char *name, void *value,
1513 size_t size);
1514 int (*ismaclabel)(const char *name);
1515 int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
1516 int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
1517 void (*release_secctx)(char *secdata, u32 seclen);
1518
1519 int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
1520 int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
1521 int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
1522
1523#ifdef CONFIG_SECURITY_NETWORK
1524 int (*unix_stream_connect)(struct sock *sock, struct sock *other,
1525 struct sock *newsk);
1526 int (*unix_may_send)(struct socket *sock, struct socket *other);
1527
1528 int (*socket_create)(int family, int type, int protocol, int kern);
1529 int (*socket_post_create)(struct socket *sock, int family, int type,
1530 int protocol, int kern);
1531 int (*socket_bind)(struct socket *sock, struct sockaddr *address,
1532 int addrlen);
1533 int (*socket_connect)(struct socket *sock, struct sockaddr *address,
1534 int addrlen);
1535 int (*socket_listen)(struct socket *sock, int backlog);
1536 int (*socket_accept)(struct socket *sock, struct socket *newsock);
1537 int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg,
1538 int size);
1539 int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg,
1540 int size, int flags);
1541 int (*socket_getsockname)(struct socket *sock);
1542 int (*socket_getpeername)(struct socket *sock);
1543 int (*socket_getsockopt)(struct socket *sock, int level, int optname);
1544 int (*socket_setsockopt)(struct socket *sock, int level, int optname);
1545 int (*socket_shutdown)(struct socket *sock, int how);
1546 int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
1547 int (*socket_getpeersec_stream)(struct socket *sock,
1548 char __user *optval,
1549 int __user *optlen, unsigned len);
1550 int (*socket_getpeersec_dgram)(struct socket *sock,
1551 struct sk_buff *skb, u32 *secid);
1552 int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
1553 void (*sk_free_security)(struct sock *sk);
1554 void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
1555 void (*sk_getsecid)(struct sock *sk, u32 *secid);
1556 void (*sock_graft)(struct sock *sk, struct socket *parent);
1557 int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
1558 struct request_sock *req);
1559 void (*inet_csk_clone)(struct sock *newsk,
1560 const struct request_sock *req);
1561 void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
1562 int (*secmark_relabel_packet)(u32 secid);
1563 void (*secmark_refcount_inc)(void);
1564 void (*secmark_refcount_dec)(void);
1565 void (*req_classify_flow)(const struct request_sock *req,
1566 struct flowi *fl);
1567 int (*tun_dev_alloc_security)(void **security);
1568 void (*tun_dev_free_security)(void *security);
1569 int (*tun_dev_create)(void);
1570 int (*tun_dev_attach_queue)(void *security);
1571 int (*tun_dev_attach)(struct sock *sk, void *security);
1572 int (*tun_dev_open)(void *security);
1573#endif /* CONFIG_SECURITY_NETWORK */
1574
1575#ifdef CONFIG_SECURITY_NETWORK_XFRM
1576 int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
1577 struct xfrm_user_sec_ctx *sec_ctx,
1578 gfp_t gfp);
1579 int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx,
1580 struct xfrm_sec_ctx **new_ctx);
1581 void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx);
1582 int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx);
1583 int (*xfrm_state_alloc)(struct xfrm_state *x,
1584 struct xfrm_user_sec_ctx *sec_ctx);
1585 int (*xfrm_state_alloc_acquire)(struct xfrm_state *x,
1586 struct xfrm_sec_ctx *polsec,
1587 u32 secid);
1588 void (*xfrm_state_free_security)(struct xfrm_state *x);
1589 int (*xfrm_state_delete_security)(struct xfrm_state *x);
1590 int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid,
1591 u8 dir);
1592 int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
1593 struct xfrm_policy *xp,
1594 const struct flowi *fl);
1595 int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
1596#endif /* CONFIG_SECURITY_NETWORK_XFRM */
1597
1598 /* key management security hooks */
1599#ifdef CONFIG_KEYS
1600 int (*key_alloc)(struct key *key, const struct cred *cred,
1601 unsigned long flags);
1602 void (*key_free)(struct key *key);
1603 int (*key_permission)(key_ref_t key_ref, const struct cred *cred,
1604 unsigned perm);
1605 int (*key_getsecurity)(struct key *key, char **_buffer);
1606#endif /* CONFIG_KEYS */
1607
1608#ifdef CONFIG_AUDIT
1609 int (*audit_rule_init)(u32 field, u32 op, char *rulestr,
1610 void **lsmrule);
1611 int (*audit_rule_known)(struct audit_krule *krule);
1612 int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule,
1613 struct audit_context *actx);
1614 void (*audit_rule_free)(void *lsmrule);
1615#endif /* CONFIG_AUDIT */
1616};
1617
1618struct security_hook_heads {
1619 struct list_head binder_set_context_mgr;
1620 struct list_head binder_transaction;
1621 struct list_head binder_transfer_binder;
1622 struct list_head binder_transfer_file;
1623 struct list_head ptrace_access_check;
1624 struct list_head ptrace_traceme;
1625 struct list_head capget;
1626 struct list_head capset;
1627 struct list_head capable;
1628 struct list_head quotactl;
1629 struct list_head quota_on;
1630 struct list_head syslog;
1631 struct list_head settime;
1632 struct list_head vm_enough_memory;
1633 struct list_head bprm_set_creds;
1634 struct list_head bprm_check_security;
1635 struct list_head bprm_secureexec;
1636 struct list_head bprm_committing_creds;
1637 struct list_head bprm_committed_creds;
1638 struct list_head sb_alloc_security;
1639 struct list_head sb_free_security;
1640 struct list_head sb_copy_data;
1641 struct list_head sb_remount;
1642 struct list_head sb_kern_mount;
1643 struct list_head sb_show_options;
1644 struct list_head sb_statfs;
1645 struct list_head sb_mount;
1646 struct list_head sb_umount;
1647 struct list_head sb_pivotroot;
1648 struct list_head sb_set_mnt_opts;
1649 struct list_head sb_clone_mnt_opts;
1650 struct list_head sb_parse_opts_str;
1651 struct list_head dentry_init_security;
1652#ifdef CONFIG_SECURITY_PATH
1653 struct list_head path_unlink;
1654 struct list_head path_mkdir;
1655 struct list_head path_rmdir;
1656 struct list_head path_mknod;
1657 struct list_head path_truncate;
1658 struct list_head path_symlink;
1659 struct list_head path_link;
1660 struct list_head path_rename;
1661 struct list_head path_chmod;
1662 struct list_head path_chown;
1663 struct list_head path_chroot;
1664#endif
1665 struct list_head inode_alloc_security;
1666 struct list_head inode_free_security;
1667 struct list_head inode_init_security;
1668 struct list_head inode_create;
1669 struct list_head inode_link;
1670 struct list_head inode_unlink;
1671 struct list_head inode_symlink;
1672 struct list_head inode_mkdir;
1673 struct list_head inode_rmdir;
1674 struct list_head inode_mknod;
1675 struct list_head inode_rename;
1676 struct list_head inode_readlink;
1677 struct list_head inode_follow_link;
1678 struct list_head inode_permission;
1679 struct list_head inode_setattr;
1680 struct list_head inode_getattr;
1681 struct list_head inode_setxattr;
1682 struct list_head inode_post_setxattr;
1683 struct list_head inode_getxattr;
1684 struct list_head inode_listxattr;
1685 struct list_head inode_removexattr;
1686 struct list_head inode_need_killpriv;
1687 struct list_head inode_killpriv;
1688 struct list_head inode_getsecurity;
1689 struct list_head inode_setsecurity;
1690 struct list_head inode_listsecurity;
1691 struct list_head inode_getsecid;
1692 struct list_head file_permission;
1693 struct list_head file_alloc_security;
1694 struct list_head file_free_security;
1695 struct list_head file_ioctl;
1696 struct list_head mmap_addr;
1697 struct list_head mmap_file;
1698 struct list_head file_mprotect;
1699 struct list_head file_lock;
1700 struct list_head file_fcntl;
1701 struct list_head file_set_fowner;
1702 struct list_head file_send_sigiotask;
1703 struct list_head file_receive;
1704 struct list_head file_open;
1705 struct list_head task_create;
1706 struct list_head task_free;
1707 struct list_head cred_alloc_blank;
1708 struct list_head cred_free;
1709 struct list_head cred_prepare;
1710 struct list_head cred_transfer;
1711 struct list_head kernel_act_as;
1712 struct list_head kernel_create_files_as;
1713 struct list_head kernel_fw_from_file;
1714 struct list_head kernel_module_request;
1715 struct list_head kernel_module_from_file;
1716 struct list_head task_fix_setuid;
1717 struct list_head task_setpgid;
1718 struct list_head task_getpgid;
1719 struct list_head task_getsid;
1720 struct list_head task_getsecid;
1721 struct list_head task_setnice;
1722 struct list_head task_setioprio;
1723 struct list_head task_getioprio;
1724 struct list_head task_setrlimit;
1725 struct list_head task_setscheduler;
1726 struct list_head task_getscheduler;
1727 struct list_head task_movememory;
1728 struct list_head task_kill;
1729 struct list_head task_wait;
1730 struct list_head task_prctl;
1731 struct list_head task_to_inode;
1732 struct list_head ipc_permission;
1733 struct list_head ipc_getsecid;
1734 struct list_head msg_msg_alloc_security;
1735 struct list_head msg_msg_free_security;
1736 struct list_head msg_queue_alloc_security;
1737 struct list_head msg_queue_free_security;
1738 struct list_head msg_queue_associate;
1739 struct list_head msg_queue_msgctl;
1740 struct list_head msg_queue_msgsnd;
1741 struct list_head msg_queue_msgrcv;
1742 struct list_head shm_alloc_security;
1743 struct list_head shm_free_security;
1744 struct list_head shm_associate;
1745 struct list_head shm_shmctl;
1746 struct list_head shm_shmat;
1747 struct list_head sem_alloc_security;
1748 struct list_head sem_free_security;
1749 struct list_head sem_associate;
1750 struct list_head sem_semctl;
1751 struct list_head sem_semop;
1752 struct list_head netlink_send;
1753 struct list_head d_instantiate;
1754 struct list_head getprocattr;
1755 struct list_head setprocattr;
1756 struct list_head ismaclabel;
1757 struct list_head secid_to_secctx;
1758 struct list_head secctx_to_secid;
1759 struct list_head release_secctx;
1760 struct list_head inode_notifysecctx;
1761 struct list_head inode_setsecctx;
1762 struct list_head inode_getsecctx;
1763#ifdef CONFIG_SECURITY_NETWORK
1764 struct list_head unix_stream_connect;
1765 struct list_head unix_may_send;
1766 struct list_head socket_create;
1767 struct list_head socket_post_create;
1768 struct list_head socket_bind;
1769 struct list_head socket_connect;
1770 struct list_head socket_listen;
1771 struct list_head socket_accept;
1772 struct list_head socket_sendmsg;
1773 struct list_head socket_recvmsg;
1774 struct list_head socket_getsockname;
1775 struct list_head socket_getpeername;
1776 struct list_head socket_getsockopt;
1777 struct list_head socket_setsockopt;
1778 struct list_head socket_shutdown;
1779 struct list_head socket_sock_rcv_skb;
1780 struct list_head socket_getpeersec_stream;
1781 struct list_head socket_getpeersec_dgram;
1782 struct list_head sk_alloc_security;
1783 struct list_head sk_free_security;
1784 struct list_head sk_clone_security;
1785 struct list_head sk_getsecid;
1786 struct list_head sock_graft;
1787 struct list_head inet_conn_request;
1788 struct list_head inet_csk_clone;
1789 struct list_head inet_conn_established;
1790 struct list_head secmark_relabel_packet;
1791 struct list_head secmark_refcount_inc;
1792 struct list_head secmark_refcount_dec;
1793 struct list_head req_classify_flow;
1794 struct list_head tun_dev_alloc_security;
1795 struct list_head tun_dev_free_security;
1796 struct list_head tun_dev_create;
1797 struct list_head tun_dev_attach_queue;
1798 struct list_head tun_dev_attach;
1799 struct list_head tun_dev_open;
1800 struct list_head skb_owned_by;
1801#endif /* CONFIG_SECURITY_NETWORK */
1802#ifdef CONFIG_SECURITY_NETWORK_XFRM
1803 struct list_head xfrm_policy_alloc_security;
1804 struct list_head xfrm_policy_clone_security;
1805 struct list_head xfrm_policy_free_security;
1806 struct list_head xfrm_policy_delete_security;
1807 struct list_head xfrm_state_alloc;
1808 struct list_head xfrm_state_alloc_acquire;
1809 struct list_head xfrm_state_free_security;
1810 struct list_head xfrm_state_delete_security;
1811 struct list_head xfrm_policy_lookup;
1812 struct list_head xfrm_state_pol_flow_match;
1813 struct list_head xfrm_decode_session;
1814#endif /* CONFIG_SECURITY_NETWORK_XFRM */
1815#ifdef CONFIG_KEYS
1816 struct list_head key_alloc;
1817 struct list_head key_free;
1818 struct list_head key_permission;
1819 struct list_head key_getsecurity;
1820#endif /* CONFIG_KEYS */
1821#ifdef CONFIG_AUDIT
1822 struct list_head audit_rule_init;
1823 struct list_head audit_rule_known;
1824 struct list_head audit_rule_match;
1825 struct list_head audit_rule_free;
1826#endif /* CONFIG_AUDIT */
1827};
1828
1829/*
1830 * Security module hook list structure.
1831 * For use with generic list macros for common operations.
1832 */
1833struct security_hook_list {
1834 struct list_head list;
1835 struct list_head *head;
1836 union security_list_options hook;
1837};
1838
1839/*
1840 * Initializing a security_hook_list structure takes
1841 * up a lot of space in a source file. This macro takes
1842 * care of the common case and reduces the amount of
1843 * text involved.
1844 */
1845#define LSM_HOOK_INIT(HEAD, HOOK) \
1846 { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
1847
1848extern struct security_hook_heads security_hook_heads;
1849
1850static inline void security_add_hooks(struct security_hook_list *hooks,
1851 int count)
1852{
1853 int i;
1854
1855 for (i = 0; i < count; i++)
1856 list_add_tail_rcu(&hooks[i].list, hooks[i].head);
1857}
1858
1859#ifdef CONFIG_SECURITY_SELINUX_DISABLE
1860/*
1861 * Assuring the safety of deleting a security module is up to
1862 * the security module involved. This may entail ordering the
1863 * module's hook list in a particular way, refusing to disable
1864 * the module once a policy is loaded or any number of other
1865 * actions better imagined than described.
1866 *
1867 * The name of the configuration option reflects the only module
1868 * that currently uses the mechanism. Any developer who thinks
1869 * disabling their module is a good idea needs to be at least as
1870 * careful as the SELinux team.
1871 */
1872static inline void security_delete_hooks(struct security_hook_list *hooks,
1873 int count)
1874{
1875 int i;
1876
1877 for (i = 0; i < count; i++)
1878 list_del_rcu(&hooks[i].list);
1879}
1880#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
1881
1882extern int __init security_module_enable(const char *module);
1883extern void __init capability_add_hooks(void);
1884#ifdef CONFIG_SECURITY_YAMA_STACKED
1885void __init yama_add_hooks(void);
1886#endif
1887
1888#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 1726ccbd8009..44348710953f 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -40,6 +40,8 @@ struct mbox_client {
40 void (*tx_done)(struct mbox_client *cl, void *mssg, int r); 40 void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
41}; 41};
42 42
43struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
44 const char *name);
43struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); 45struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
44int mbox_send_message(struct mbox_chan *chan, void *mssg); 46int mbox_send_message(struct mbox_chan *chan, void *mssg);
45void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ 47void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index d4cf96f07cfc..68c42454439b 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -72,7 +72,7 @@ struct mbox_chan_ops {
72 */ 72 */
73struct mbox_controller { 73struct mbox_controller {
74 struct device *dev; 74 struct device *dev;
75 struct mbox_chan_ops *ops; 75 const struct mbox_chan_ops *ops;
76 struct mbox_chan *chans; 76 struct mbox_chan *chans;
77 int num_chans; 77 int num_chans;
78 bool txdone_irq; 78 bool txdone_irq;
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index 66c30a763b10..11f00cdabe3d 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -23,7 +23,8 @@ struct mdio_gpio_platform_data {
23 bool mdio_active_low; 23 bool mdio_active_low;
24 bool mdo_active_low; 24 bool mdo_active_low;
25 25
26 unsigned int phy_mask; 26 u32 phy_mask;
27 u32 phy_ignore_ta_mask;
27 int irqs[PHY_MAX_ADDR]; 28 int irqs[PHY_MAX_ADDR];
28 /* reset callback */ 29 /* reset callback */
29 int (*reset)(struct mii_bus *bus); 30 int (*reset)(struct mii_bus *bus);
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 0819d36a3a74..a16b1f9c1aca 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -7,6 +7,42 @@
7 7
8struct mei_cl_device; 8struct mei_cl_device;
9 9
10typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
11 u32 events, void *context);
12
13/**
14 * struct mei_cl_device - MEI device handle
15 * An mei_cl_device pointer is returned from mei_add_device()
16 * and links MEI bus clients to their actual ME host client pointer.
17 * Drivers for MEI devices will get an mei_cl_device pointer
18 * when being probed and shall use it for doing ME bus I/O.
19 *
20 * @dev: linux driver model device pointer
21 * @me_cl: me client
22 * @cl: mei client
23 * @name: device name
24 * @event_work: async work to execute event callback
25 * @event_cb: Drivers register this callback to get asynchronous ME
26 * events (e.g. Rx buffer pending) notifications.
27 * @event_context: event callback run context
28 * @events: Events bitmask sent to the driver.
29 * @priv_data: client private data
30 */
31struct mei_cl_device {
32 struct device dev;
33
34 struct mei_me_client *me_cl;
35 struct mei_cl *cl;
36 char name[MEI_CL_NAME_SIZE];
37
38 struct work_struct event_work;
39 mei_cl_event_cb_t event_cb;
40 void *event_context;
41 unsigned long events;
42
43 void *priv_data;
44};
45
10struct mei_cl_driver { 46struct mei_cl_driver {
11 struct device_driver driver; 47 struct device_driver driver;
12 const char *name; 48 const char *name;
@@ -28,8 +64,6 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver);
28ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); 64ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
29ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); 65ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
30 66
31typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
32 u32 events, void *context);
33int mei_cl_register_event_cb(struct mei_cl_device *device, 67int mei_cl_register_event_cb(struct mei_cl_device *device,
34 mei_cl_event_cb_t read_cb, void *context); 68 mei_cl_event_cb_t read_cb, void *context);
35 69
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 9497ec7c77ea..cc4b01972060 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -21,7 +21,11 @@
21#define INIT_PHYSMEM_REGIONS 4 21#define INIT_PHYSMEM_REGIONS 4
22 22
23/* Definition of memblock flags. */ 23/* Definition of memblock flags. */
24#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ 24enum {
25 MEMBLOCK_NONE = 0x0, /* No special request */
26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
28};
25 29
26struct memblock_region { 30struct memblock_region {
27 phys_addr_t base; 31 phys_addr_t base;
@@ -61,7 +65,7 @@ extern bool movable_node_enabled;
61 65
62phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, 66phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
63 phys_addr_t start, phys_addr_t end, 67 phys_addr_t start, phys_addr_t end,
64 int nid); 68 int nid, ulong flags);
65phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 69phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
66 phys_addr_t size, phys_addr_t align); 70 phys_addr_t size, phys_addr_t align);
67phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); 71phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
@@ -75,6 +79,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
75void memblock_trim_memory(phys_addr_t align); 79void memblock_trim_memory(phys_addr_t align);
76int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 80int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
77int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 81int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
82int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
83ulong choose_memblock_flags(void);
78 84
79/* Low level functions */ 85/* Low level functions */
80int memblock_add_range(struct memblock_type *type, 86int memblock_add_range(struct memblock_type *type,
@@ -85,14 +91,19 @@ int memblock_remove_range(struct memblock_type *type,
85 phys_addr_t base, 91 phys_addr_t base,
86 phys_addr_t size); 92 phys_addr_t size);
87 93
88void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, 94void __next_mem_range(u64 *idx, int nid, ulong flags,
95 struct memblock_type *type_a,
89 struct memblock_type *type_b, phys_addr_t *out_start, 96 struct memblock_type *type_b, phys_addr_t *out_start,
90 phys_addr_t *out_end, int *out_nid); 97 phys_addr_t *out_end, int *out_nid);
91 98
92void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, 99void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
100 struct memblock_type *type_a,
93 struct memblock_type *type_b, phys_addr_t *out_start, 101 struct memblock_type *type_b, phys_addr_t *out_start,
94 phys_addr_t *out_end, int *out_nid); 102 phys_addr_t *out_end, int *out_nid);
95 103
104void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
105 phys_addr_t *out_end);
106
96/** 107/**
97 * for_each_mem_range - iterate through memblock areas from type_a and not 108 * for_each_mem_range - iterate through memblock areas from type_a and not
98 * included in type_b. Or just type_a if type_b is NULL. 109 * included in type_b. Or just type_a if type_b is NULL.
@@ -100,16 +111,17 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
100 * @type_a: ptr to memblock_type to iterate 111 * @type_a: ptr to memblock_type to iterate
101 * @type_b: ptr to memblock_type which excludes from the iteration 112 * @type_b: ptr to memblock_type which excludes from the iteration
102 * @nid: node selector, %NUMA_NO_NODE for all nodes 113 * @nid: node selector, %NUMA_NO_NODE for all nodes
114 * @flags: pick from blocks based on memory attributes
103 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 115 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
104 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 116 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
105 * @p_nid: ptr to int for nid of the range, can be %NULL 117 * @p_nid: ptr to int for nid of the range, can be %NULL
106 */ 118 */
107#define for_each_mem_range(i, type_a, type_b, nid, \ 119#define for_each_mem_range(i, type_a, type_b, nid, flags, \
108 p_start, p_end, p_nid) \ 120 p_start, p_end, p_nid) \
109 for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \ 121 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
110 p_start, p_end, p_nid); \ 122 p_start, p_end, p_nid); \
111 i != (u64)ULLONG_MAX; \ 123 i != (u64)ULLONG_MAX; \
112 __next_mem_range(&i, nid, type_a, type_b, \ 124 __next_mem_range(&i, nid, flags, type_a, type_b, \
113 p_start, p_end, p_nid)) 125 p_start, p_end, p_nid))
114 126
115/** 127/**
@@ -119,19 +131,35 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
119 * @type_a: ptr to memblock_type to iterate 131 * @type_a: ptr to memblock_type to iterate
120 * @type_b: ptr to memblock_type which excludes from the iteration 132 * @type_b: ptr to memblock_type which excludes from the iteration
121 * @nid: node selector, %NUMA_NO_NODE for all nodes 133 * @nid: node selector, %NUMA_NO_NODE for all nodes
134 * @flags: pick from blocks based on memory attributes
122 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 135 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
123 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 136 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
124 * @p_nid: ptr to int for nid of the range, can be %NULL 137 * @p_nid: ptr to int for nid of the range, can be %NULL
125 */ 138 */
126#define for_each_mem_range_rev(i, type_a, type_b, nid, \ 139#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
127 p_start, p_end, p_nid) \ 140 p_start, p_end, p_nid) \
128 for (i = (u64)ULLONG_MAX, \ 141 for (i = (u64)ULLONG_MAX, \
129 __next_mem_range_rev(&i, nid, type_a, type_b, \ 142 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
130 p_start, p_end, p_nid); \ 143 p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \ 144 i != (u64)ULLONG_MAX; \
132 __next_mem_range_rev(&i, nid, type_a, type_b, \ 145 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
133 p_start, p_end, p_nid)) 146 p_start, p_end, p_nid))
134 147
148/**
149 * for_each_reserved_mem_region - iterate over all reserved memblock areas
150 * @i: u64 used as loop variable
151 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
152 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
153 *
154 * Walks over reserved areas of memblock. Available as soon as memblock
155 * is initialized.
156 */
157#define for_each_reserved_mem_region(i, p_start, p_end) \
158 for (i = 0UL, \
159 __next_reserved_mem_region(&i, p_start, p_end); \
160 i != (u64)ULLONG_MAX; \
161 __next_reserved_mem_region(&i, p_start, p_end))
162
135#ifdef CONFIG_MOVABLE_NODE 163#ifdef CONFIG_MOVABLE_NODE
136static inline bool memblock_is_hotpluggable(struct memblock_region *m) 164static inline bool memblock_is_hotpluggable(struct memblock_region *m)
137{ 165{
@@ -153,6 +181,11 @@ static inline bool movable_node_is_enabled(void)
153} 181}
154#endif 182#endif
155 183
184static inline bool memblock_is_mirror(struct memblock_region *m)
185{
186 return m->flags & MEMBLOCK_MIRROR;
187}
188
156#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 189#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
157int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, 190int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
158 unsigned long *end_pfn); 191 unsigned long *end_pfn);
@@ -181,13 +214,14 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
181 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 214 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
182 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 215 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
183 * @p_nid: ptr to int for nid of the range, can be %NULL 216 * @p_nid: ptr to int for nid of the range, can be %NULL
217 * @flags: pick from blocks based on memory attributes
184 * 218 *
185 * Walks over free (memory && !reserved) areas of memblock. Available as 219 * Walks over free (memory && !reserved) areas of memblock. Available as
186 * soon as memblock is initialized. 220 * soon as memblock is initialized.
187 */ 221 */
188#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ 222#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
189 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ 223 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
190 nid, p_start, p_end, p_nid) 224 nid, flags, p_start, p_end, p_nid)
191 225
192/** 226/**
193 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 227 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -196,13 +230,15 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
196 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 230 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
197 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 231 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
198 * @p_nid: ptr to int for nid of the range, can be %NULL 232 * @p_nid: ptr to int for nid of the range, can be %NULL
233 * @flags: pick from blocks based on memory attributes
199 * 234 *
200 * Walks over free (memory && !reserved) areas of memblock in reverse 235 * Walks over free (memory && !reserved) areas of memblock in reverse
201 * order. Available as soon as memblock is initialized. 236 * order. Available as soon as memblock is initialized.
202 */ 237 */
203#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ 238#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
239 p_nid) \
204 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 240 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
205 nid, p_start, p_end, p_nid) 241 nid, flags, p_start, p_end, p_nid)
206 242
207static inline void memblock_set_region_flags(struct memblock_region *r, 243static inline void memblock_set_region_flags(struct memblock_region *r,
208 unsigned long flags) 244 unsigned long flags)
@@ -273,7 +309,8 @@ static inline bool memblock_bottom_up(void) { return false; }
273#define MEMBLOCK_ALLOC_ACCESSIBLE 0 309#define MEMBLOCK_ALLOC_ACCESSIBLE 0
274 310
275phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 311phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
276 phys_addr_t start, phys_addr_t end); 312 phys_addr_t start, phys_addr_t end,
313 ulong flags);
277phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, 314phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
278 phys_addr_t max_addr); 315 phys_addr_t max_addr);
279phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, 316phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c8918114804..73b02b0a8f60 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -41,6 +41,7 @@ enum mem_cgroup_stat_index {
41 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 41 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
42 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 42 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
43 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 43 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
44 MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
44 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ 45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
45 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
46 MEM_CGROUP_STAT_NSTATS, 47 MEM_CGROUP_STAT_NSTATS,
@@ -67,6 +68,8 @@ enum mem_cgroup_events_index {
67}; 68};
68 69
69#ifdef CONFIG_MEMCG 70#ifdef CONFIG_MEMCG
71extern struct cgroup_subsys_state *mem_cgroup_root_css;
72
70void mem_cgroup_events(struct mem_cgroup *memcg, 73void mem_cgroup_events(struct mem_cgroup *memcg,
71 enum mem_cgroup_events_index idx, 74 enum mem_cgroup_events_index idx,
72 unsigned int nr); 75 unsigned int nr);
@@ -112,6 +115,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
112} 115}
113 116
114extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 117extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
118extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
115 119
116struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 120struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
117 struct mem_cgroup *, 121 struct mem_cgroup *,
@@ -195,6 +199,8 @@ void mem_cgroup_split_huge_fixup(struct page *head);
195#else /* CONFIG_MEMCG */ 199#else /* CONFIG_MEMCG */
196struct mem_cgroup; 200struct mem_cgroup;
197 201
202#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
203
198static inline void mem_cgroup_events(struct mem_cgroup *memcg, 204static inline void mem_cgroup_events(struct mem_cgroup *memcg,
199 enum mem_cgroup_events_index idx, 205 enum mem_cgroup_events_index idx,
200 unsigned int nr) 206 unsigned int nr)
@@ -382,6 +388,29 @@ enum {
382 OVER_LIMIT, 388 OVER_LIMIT,
383}; 389};
384 390
391#ifdef CONFIG_CGROUP_WRITEBACK
392
393struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
394struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
395void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
396 unsigned long *pdirty, unsigned long *pwriteback);
397
398#else /* CONFIG_CGROUP_WRITEBACK */
399
400static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
401{
402 return NULL;
403}
404
405static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
406 unsigned long *pavail,
407 unsigned long *pdirty,
408 unsigned long *pwriteback)
409{
410}
411
412#endif /* CONFIG_CGROUP_WRITEBACK */
413
385struct sock; 414struct sock;
386#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 415#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
387void sock_update_memcg(struct sock *sk); 416void sock_update_memcg(struct sock *sk);
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 16a498f48169..2f434f4f79a1 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -117,6 +117,7 @@ struct arizona {
117 int num_core_supplies; 117 int num_core_supplies;
118 struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES]; 118 struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
119 struct regulator *dcvdd; 119 struct regulator *dcvdd;
120 bool has_fully_powered_off;
120 121
121 struct arizona_pdata pdata; 122 struct arizona_pdata pdata;
122 123
@@ -153,7 +154,15 @@ int arizona_request_irq(struct arizona *arizona, int irq, char *name,
153void arizona_free_irq(struct arizona *arizona, int irq, void *data); 154void arizona_free_irq(struct arizona *arizona, int irq, void *data);
154int arizona_set_irq_wake(struct arizona *arizona, int irq, int on); 155int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
155 156
157#ifdef CONFIG_MFD_WM5102
156int wm5102_patch(struct arizona *arizona); 158int wm5102_patch(struct arizona *arizona);
159#else
160static inline int wm5102_patch(struct arizona *arizona)
161{
162 return 0;
163}
164#endif
165
157int wm5110_patch(struct arizona *arizona); 166int wm5110_patch(struct arizona *arizona);
158int wm8997_patch(struct arizona *arizona); 167int wm8997_patch(struct arizona *arizona);
159 168
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 1789cb0f4f17..43db4faad143 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -121,6 +121,9 @@ struct arizona_pdata {
121 /** GPIO used for mic isolation with HPDET */ 121 /** GPIO used for mic isolation with HPDET */
122 int hpdet_id_gpio; 122 int hpdet_id_gpio;
123 123
124 /** Channel to use for headphone detection */
125 unsigned int hpdet_channel;
126
124 /** Extra debounce timeout used during initial mic detection (ms) */ 127 /** Extra debounce timeout used during initial mic detection (ms) */
125 int micd_detect_debounce; 128 int micd_detect_debounce;
126 129
@@ -156,7 +159,10 @@ struct arizona_pdata {
156 /** MICBIAS configurations */ 159 /** MICBIAS configurations */
157 struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS]; 160 struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
158 161
159 /** Mode of input structures */ 162 /**
163 * Mode of input structures
164 * One of the ARIZONA_INMODE_xxx values
165 */
160 int inmode[ARIZONA_MAX_INPUT]; 166 int inmode[ARIZONA_MAX_INPUT];
161 167
162 /** Mode for outputs */ 168 /** Mode for outputs */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index aacc10d7789c..3499d36e6067 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -2515,9 +2515,12 @@
2515#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */ 2515#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
2516#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */ 2516#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
2517#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */ 2517#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
2518#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */ 2518#define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */
2519#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */ 2519#define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */
2520#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */ 2520#define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */
2521#define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */
2522#define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */
2523#define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */
2521#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */ 2524#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
2522#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */ 2525#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
2523#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */ 2526#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
@@ -2588,9 +2591,12 @@
2588#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */ 2591#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
2589#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */ 2592#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
2590#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */ 2593#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
2591#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */ 2594#define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */
2592#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */ 2595#define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */
2593#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */ 2596#define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */
2597#define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */
2598#define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */
2599#define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */
2594#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */ 2600#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
2595#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */ 2601#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
2596#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */ 2602#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
@@ -2661,9 +2667,12 @@
2661#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */ 2667#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
2662#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */ 2668#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
2663#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */ 2669#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
2664#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */ 2670#define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */
2665#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */ 2671#define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */
2666#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */ 2672#define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */
2673#define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */
2674#define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */
2675#define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */
2667#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */ 2676#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
2668#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */ 2677#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
2669#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */ 2678#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index dfabd6db7ddf..c2aa853fb412 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,7 @@
14enum { 14enum {
15 AXP202_ID = 0, 15 AXP202_ID = 0,
16 AXP209_ID, 16 AXP209_ID,
17 AXP221_ID,
17 AXP288_ID, 18 AXP288_ID,
18 NR_AXP20X_VARIANTS, 19 NR_AXP20X_VARIANTS,
19}; 20};
@@ -45,6 +46,28 @@ enum {
45#define AXP20X_V_LTF_DISCHRG 0x3c 46#define AXP20X_V_LTF_DISCHRG 0x3c
46#define AXP20X_V_HTF_DISCHRG 0x3d 47#define AXP20X_V_HTF_DISCHRG 0x3d
47 48
49#define AXP22X_PWR_OUT_CTRL1 0x10
50#define AXP22X_PWR_OUT_CTRL2 0x12
51#define AXP22X_PWR_OUT_CTRL3 0x13
52#define AXP22X_DLDO1_V_OUT 0x15
53#define AXP22X_DLDO2_V_OUT 0x16
54#define AXP22X_DLDO3_V_OUT 0x17
55#define AXP22X_DLDO4_V_OUT 0x18
56#define AXP22X_ELDO1_V_OUT 0x19
57#define AXP22X_ELDO2_V_OUT 0x1a
58#define AXP22X_ELDO3_V_OUT 0x1b
59#define AXP22X_DC5LDO_V_OUT 0x1c
60#define AXP22X_DCDC1_V_OUT 0x21
61#define AXP22X_DCDC2_V_OUT 0x22
62#define AXP22X_DCDC3_V_OUT 0x23
63#define AXP22X_DCDC4_V_OUT 0x24
64#define AXP22X_DCDC5_V_OUT 0x25
65#define AXP22X_DCDC23_V_RAMP_CTRL 0x27
66#define AXP22X_ALDO1_V_OUT 0x28
67#define AXP22X_ALDO2_V_OUT 0x29
68#define AXP22X_ALDO3_V_OUT 0x2a
69#define AXP22X_CHRG_CTRL3 0x35
70
48/* Interrupt */ 71/* Interrupt */
49#define AXP20X_IRQ1_EN 0x40 72#define AXP20X_IRQ1_EN 0x40
50#define AXP20X_IRQ2_EN 0x41 73#define AXP20X_IRQ2_EN 0x41
@@ -100,6 +123,9 @@ enum {
100#define AXP20X_VBUS_MON 0x8b 123#define AXP20X_VBUS_MON 0x8b
101#define AXP20X_OVER_TMP 0x8f 124#define AXP20X_OVER_TMP 0x8f
102 125
126#define AXP22X_PWREN_CTRL1 0x8c
127#define AXP22X_PWREN_CTRL2 0x8d
128
103/* GPIO */ 129/* GPIO */
104#define AXP20X_GPIO0_CTRL 0x90 130#define AXP20X_GPIO0_CTRL 0x90
105#define AXP20X_LDO5_V_OUT 0x91 131#define AXP20X_LDO5_V_OUT 0x91
@@ -108,6 +134,11 @@ enum {
108#define AXP20X_GPIO20_SS 0x94 134#define AXP20X_GPIO20_SS 0x94
109#define AXP20X_GPIO3_CTRL 0x95 135#define AXP20X_GPIO3_CTRL 0x95
110 136
137#define AXP22X_LDO_IO0_V_OUT 0x91
138#define AXP22X_LDO_IO1_V_OUT 0x93
139#define AXP22X_GPIO_STATE 0x94
140#define AXP22X_GPIO_PULL_DOWN 0x95
141
111/* Battery */ 142/* Battery */
112#define AXP20X_CHRG_CC_31_24 0xb0 143#define AXP20X_CHRG_CC_31_24 0xb0
113#define AXP20X_CHRG_CC_23_16 0xb1 144#define AXP20X_CHRG_CC_23_16 0xb1
@@ -120,6 +151,9 @@ enum {
120#define AXP20X_CC_CTRL 0xb8 151#define AXP20X_CC_CTRL 0xb8
121#define AXP20X_FG_RES 0xb9 152#define AXP20X_FG_RES 0xb9
122 153
154/* AXP22X specific registers */
155#define AXP22X_BATLOW_THRES1 0xe6
156
123/* AXP288 specific registers */ 157/* AXP288 specific registers */
124#define AXP288_PMIC_ADC_H 0x56 158#define AXP288_PMIC_ADC_H 0x56
125#define AXP288_PMIC_ADC_L 0x57 159#define AXP288_PMIC_ADC_L 0x57
@@ -158,6 +192,30 @@ enum {
158 AXP20X_REG_ID_MAX, 192 AXP20X_REG_ID_MAX,
159}; 193};
160 194
195enum {
196 AXP22X_DCDC1 = 0,
197 AXP22X_DCDC2,
198 AXP22X_DCDC3,
199 AXP22X_DCDC4,
200 AXP22X_DCDC5,
201 AXP22X_DC1SW,
202 AXP22X_DC5LDO,
203 AXP22X_ALDO1,
204 AXP22X_ALDO2,
205 AXP22X_ALDO3,
206 AXP22X_ELDO1,
207 AXP22X_ELDO2,
208 AXP22X_ELDO3,
209 AXP22X_DLDO1,
210 AXP22X_DLDO2,
211 AXP22X_DLDO3,
212 AXP22X_DLDO4,
213 AXP22X_RTC_LDO,
214 AXP22X_LDO_IO0,
215 AXP22X_LDO_IO1,
216 AXP22X_REG_ID_MAX,
217};
218
161/* IRQs */ 219/* IRQs */
162enum { 220enum {
163 AXP20X_IRQ_ACIN_OVER_V = 1, 221 AXP20X_IRQ_ACIN_OVER_V = 1,
@@ -199,6 +257,34 @@ enum {
199 AXP20X_IRQ_GPIO0_INPUT, 257 AXP20X_IRQ_GPIO0_INPUT,
200}; 258};
201 259
260enum axp22x_irqs {
261 AXP22X_IRQ_ACIN_OVER_V = 1,
262 AXP22X_IRQ_ACIN_PLUGIN,
263 AXP22X_IRQ_ACIN_REMOVAL,
264 AXP22X_IRQ_VBUS_OVER_V,
265 AXP22X_IRQ_VBUS_PLUGIN,
266 AXP22X_IRQ_VBUS_REMOVAL,
267 AXP22X_IRQ_VBUS_V_LOW,
268 AXP22X_IRQ_BATT_PLUGIN,
269 AXP22X_IRQ_BATT_REMOVAL,
270 AXP22X_IRQ_BATT_ENT_ACT_MODE,
271 AXP22X_IRQ_BATT_EXIT_ACT_MODE,
272 AXP22X_IRQ_CHARG,
273 AXP22X_IRQ_CHARG_DONE,
274 AXP22X_IRQ_BATT_TEMP_HIGH,
275 AXP22X_IRQ_BATT_TEMP_LOW,
276 AXP22X_IRQ_DIE_TEMP_HIGH,
277 AXP22X_IRQ_PEK_SHORT,
278 AXP22X_IRQ_PEK_LONG,
279 AXP22X_IRQ_LOW_PWR_LVL1,
280 AXP22X_IRQ_LOW_PWR_LVL2,
281 AXP22X_IRQ_TIMER,
282 AXP22X_IRQ_PEK_RIS_EDGE,
283 AXP22X_IRQ_PEK_FAL_EDGE,
284 AXP22X_IRQ_GPIO1_INPUT,
285 AXP22X_IRQ_GPIO0_INPUT,
286};
287
202enum axp288_irqs { 288enum axp288_irqs {
203 AXP288_IRQ_VBUS_FALL = 2, 289 AXP288_IRQ_VBUS_FALL = 2,
204 AXP288_IRQ_VBUS_RISE, 290 AXP288_IRQ_VBUS_RISE,
@@ -275,4 +361,16 @@ struct axp20x_fg_pdata {
275 int thermistor_curve[MAX_THERM_CURVE_SIZE][2]; 361 int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
276}; 362};
277 363
364struct axp20x_chrg_pdata {
365 int max_cc;
366 int max_cv;
367 int def_cc;
368 int def_cv;
369};
370
371struct axp288_extcon_pdata {
372 /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
373 struct gpio_desc *gpio_mux_cntl;
374};
375
278#endif /* __LINUX_MFD_AXP20X_H */ 376#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 324a34683971..da72671a42fa 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -17,10 +17,29 @@
17#define __LINUX_MFD_CROS_EC_H 17#define __LINUX_MFD_CROS_EC_H
18 18
19#include <linux/cdev.h> 19#include <linux/cdev.h>
20#include <linux/device.h>
20#include <linux/notifier.h> 21#include <linux/notifier.h>
21#include <linux/mfd/cros_ec_commands.h> 22#include <linux/mfd/cros_ec_commands.h>
22#include <linux/mutex.h> 23#include <linux/mutex.h>
23 24
25#define CROS_EC_DEV_NAME "cros_ec"
26#define CROS_EC_DEV_PD_NAME "cros_pd"
27
28/*
29 * The EC is unresponsive for a time after a reboot command. Add a
30 * simple delay to make sure that the bus stays locked.
31 */
32#define EC_REBOOT_DELAY_MS 50
33
34/*
35 * Max bus-specific overhead incurred by request/responses.
36 * I2C requires 1 additional byte for requests.
37 * I2C requires 2 additional bytes for responses.
38 * */
39#define EC_PROTO_VERSION_UNKNOWN 0
40#define EC_MAX_REQUEST_OVERHEAD 1
41#define EC_MAX_RESPONSE_OVERHEAD 2
42
24/* 43/*
25 * Command interface between EC and AP, for LPC, I2C and SPI interfaces. 44 * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
26 */ 45 */
@@ -42,8 +61,7 @@ enum {
42 * @outsize: Outgoing length in bytes 61 * @outsize: Outgoing length in bytes
43 * @insize: Max number of bytes to accept from EC 62 * @insize: Max number of bytes to accept from EC
44 * @result: EC's response to the command (separate from communication failure) 63 * @result: EC's response to the command (separate from communication failure)
45 * @outdata: Outgoing data to EC 64 * @data: Where to put the incoming data from EC and outgoing data to EC
46 * @indata: Where to put the incoming data from EC
47 */ 65 */
48struct cros_ec_command { 66struct cros_ec_command {
49 uint32_t version; 67 uint32_t version;
@@ -51,18 +69,14 @@ struct cros_ec_command {
51 uint32_t outsize; 69 uint32_t outsize;
52 uint32_t insize; 70 uint32_t insize;
53 uint32_t result; 71 uint32_t result;
54 uint8_t outdata[EC_PROTO2_MAX_PARAM_SIZE]; 72 uint8_t data[0];
55 uint8_t indata[EC_PROTO2_MAX_PARAM_SIZE];
56}; 73};
57 74
58/** 75/**
59 * struct cros_ec_device - Information about a ChromeOS EC device 76 * struct cros_ec_device - Information about a ChromeOS EC device
60 * 77 *
61 * @ec_name: name of EC device (e.g. 'chromeos-ec')
62 * @phys_name: name of physical comms layer (e.g. 'i2c-4') 78 * @phys_name: name of physical comms layer (e.g. 'i2c-4')
63 * @dev: Device pointer for physical comms device 79 * @dev: Device pointer for physical comms device
64 * @vdev: Device pointer for virtual comms device
65 * @cdev: Character device structure for virtual comms device
66 * @was_wake_device: true if this device was set to wake the system from 80 * @was_wake_device: true if this device was set to wake the system from
67 * sleep at the last suspend 81 * sleep at the last suspend
68 * @cmd_readmem: direct read of the EC memory-mapped region, if supported 82 * @cmd_readmem: direct read of the EC memory-mapped region, if supported
@@ -74,6 +88,7 @@ struct cros_ec_command {
74 * 88 *
75 * @priv: Private data 89 * @priv: Private data
76 * @irq: Interrupt to use 90 * @irq: Interrupt to use
91 * @id: Device id
77 * @din: input buffer (for data from EC) 92 * @din: input buffer (for data from EC)
78 * @dout: output buffer (for data to EC) 93 * @dout: output buffer (for data to EC)
79 * \note 94 * \note
@@ -85,41 +100,72 @@ struct cros_ec_command {
85 * to using dword. 100 * to using dword.
86 * @din_size: size of din buffer to allocate (zero to use static din) 101 * @din_size: size of din buffer to allocate (zero to use static din)
87 * @dout_size: size of dout buffer to allocate (zero to use static dout) 102 * @dout_size: size of dout buffer to allocate (zero to use static dout)
88 * @parent: pointer to parent device (e.g. i2c or spi device)
89 * @wake_enabled: true if this device can wake the system from sleep 103 * @wake_enabled: true if this device can wake the system from sleep
90 * @cmd_xfer: send command to EC and get response 104 * @cmd_xfer: send command to EC and get response
91 * Returns the number of bytes received if the communication succeeded, but 105 * Returns the number of bytes received if the communication succeeded, but
92 * that doesn't mean the EC was happy with the command. The caller 106 * that doesn't mean the EC was happy with the command. The caller
93 * should check msg.result for the EC's result code. 107 * should check msg.result for the EC's result code.
108 * @pkt_xfer: send packet to EC and get response
94 * @lock: one transaction at a time 109 * @lock: one transaction at a time
95 */ 110 */
96struct cros_ec_device { 111struct cros_ec_device {
97 112
98 /* These are used by other drivers that want to talk to the EC */ 113 /* These are used by other drivers that want to talk to the EC */
99 const char *ec_name;
100 const char *phys_name; 114 const char *phys_name;
101 struct device *dev; 115 struct device *dev;
102 struct device *vdev;
103 struct cdev cdev;
104 bool was_wake_device; 116 bool was_wake_device;
105 struct class *cros_class; 117 struct class *cros_class;
106 int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, 118 int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
107 unsigned int bytes, void *dest); 119 unsigned int bytes, void *dest);
108 120
109 /* These are used to implement the platform-specific interface */ 121 /* These are used to implement the platform-specific interface */
122 u16 max_request;
123 u16 max_response;
124 u16 max_passthru;
125 u16 proto_version;
110 void *priv; 126 void *priv;
111 int irq; 127 int irq;
112 uint8_t *din; 128 u8 *din;
113 uint8_t *dout; 129 u8 *dout;
114 int din_size; 130 int din_size;
115 int dout_size; 131 int dout_size;
116 struct device *parent;
117 bool wake_enabled; 132 bool wake_enabled;
118 int (*cmd_xfer)(struct cros_ec_device *ec, 133 int (*cmd_xfer)(struct cros_ec_device *ec,
119 struct cros_ec_command *msg); 134 struct cros_ec_command *msg);
135 int (*pkt_xfer)(struct cros_ec_device *ec,
136 struct cros_ec_command *msg);
120 struct mutex lock; 137 struct mutex lock;
121}; 138};
122 139
140/* struct cros_ec_platform - ChromeOS EC platform information
141 *
142 * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
143 * used in /dev/ and sysfs.
144 * @cmd_offset: offset to apply for each command. Set when
145 * registering a devicde behind another one.
146 */
147struct cros_ec_platform {
148 const char *ec_name;
149 u16 cmd_offset;
150};
151
152/*
153 * struct cros_ec_dev - ChromeOS EC device entry point
154 *
155 * @class_dev: Device structure used in sysfs
156 * @cdev: Character device structure in /dev
157 * @ec_dev: cros_ec_device structure to talk to the physical device
158 * @dev: pointer to the platform device
159 * @cmd_offset: offset to apply for each command.
160 */
161struct cros_ec_dev {
162 struct device class_dev;
163 struct cdev cdev;
164 struct cros_ec_device *ec_dev;
165 struct device *dev;
166 u16 cmd_offset;
167};
168
123/** 169/**
124 * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device 170 * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
125 * 171 *
@@ -198,4 +244,16 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
198 */ 244 */
199int cros_ec_register(struct cros_ec_device *ec_dev); 245int cros_ec_register(struct cros_ec_device *ec_dev);
200 246
247/**
248 * cros_ec_register - Query the protocol version supported by the ChromeOS EC
249 *
250 * @ec_dev: Device to register
251 * @return 0 if ok, -ve on error
252 */
253int cros_ec_query_all(struct cros_ec_device *ec_dev);
254
255/* sysfs stuff */
256extern struct attribute_group cros_ec_attr_group;
257extern struct attribute_group cros_ec_lightbar_attr_group;
258
201#endif /* __LINUX_MFD_CROS_EC_H */ 259#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index a49cd41feea7..13b630c10d4c 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -515,7 +515,7 @@ struct ec_host_response {
515/* 515/*
516 * Notes on commands: 516 * Notes on commands:
517 * 517 *
518 * Each command is an 8-byte command value. Commands which take params or 518 * Each command is an 16-bit command value. Commands which take params or
519 * return response data specify structs for that data. If no struct is 519 * return response data specify structs for that data. If no struct is
520 * specified, the command does not input or output data, respectively. 520 * specified, the command does not input or output data, respectively.
521 * Parameter/response length is implicit in the structs. Some underlying 521 * Parameter/response length is implicit in the structs. Some underlying
@@ -966,7 +966,7 @@ struct rgb_s {
966/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a 966/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
967 * host command, but the alignment is the same regardless. Keep it that way. 967 * host command, but the alignment is the same regardless. Keep it that way.
968 */ 968 */
969struct lightbar_params { 969struct lightbar_params_v0 {
970 /* Timing */ 970 /* Timing */
971 int32_t google_ramp_up; 971 int32_t google_ramp_up;
972 int32_t google_ramp_down; 972 int32_t google_ramp_down;
@@ -1000,32 +1000,81 @@ struct lightbar_params {
1000 struct rgb_s color[8]; /* 0-3 are Google colors */ 1000 struct rgb_s color[8]; /* 0-3 are Google colors */
1001} __packed; 1001} __packed;
1002 1002
1003struct lightbar_params_v1 {
1004 /* Timing */
1005 int32_t google_ramp_up;
1006 int32_t google_ramp_down;
1007 int32_t s3s0_ramp_up;
1008 int32_t s0_tick_delay[2]; /* AC=0/1 */
1009 int32_t s0a_tick_delay[2]; /* AC=0/1 */
1010 int32_t s0s3_ramp_down;
1011 int32_t s3_sleep_for;
1012 int32_t s3_ramp_up;
1013 int32_t s3_ramp_down;
1014 int32_t tap_tick_delay;
1015 int32_t tap_display_time;
1016
1017 /* Tap-for-battery params */
1018 uint8_t tap_pct_red;
1019 uint8_t tap_pct_green;
1020 uint8_t tap_seg_min_on;
1021 uint8_t tap_seg_max_on;
1022 uint8_t tap_seg_osc;
1023 uint8_t tap_idx[3];
1024
1025 /* Oscillation */
1026 uint8_t osc_min[2]; /* AC=0/1 */
1027 uint8_t osc_max[2]; /* AC=0/1 */
1028 uint8_t w_ofs[2]; /* AC=0/1 */
1029
1030 /* Brightness limits based on the backlight and AC. */
1031 uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
1032 uint8_t bright_bl_on_min[2]; /* AC=0/1 */
1033 uint8_t bright_bl_on_max[2]; /* AC=0/1 */
1034
1035 /* Battery level thresholds */
1036 uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
1037
1038 /* Map [AC][battery_level] to color index */
1039 uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
1040 uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
1041
1042 /* Color palette */
1043 struct rgb_s color[8]; /* 0-3 are Google colors */
1044} __packed;
1045
1003struct ec_params_lightbar { 1046struct ec_params_lightbar {
1004 uint8_t cmd; /* Command (see enum lightbar_command) */ 1047 uint8_t cmd; /* Command (see enum lightbar_command) */
1005 union { 1048 union {
1006 struct { 1049 struct {
1007 /* no args */ 1050 /* no args */
1008 } dump, off, on, init, get_seq, get_params, version; 1051 } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
1052 version, get_brightness, get_demo;
1009 1053
1010 struct num { 1054 struct {
1011 uint8_t num; 1055 uint8_t num;
1012 } brightness, seq, demo; 1056 } set_brightness, seq, demo;
1013 1057
1014 struct reg { 1058 struct {
1015 uint8_t ctrl, reg, value; 1059 uint8_t ctrl, reg, value;
1016 } reg; 1060 } reg;
1017 1061
1018 struct rgb { 1062 struct {
1019 uint8_t led, red, green, blue; 1063 uint8_t led, red, green, blue;
1020 } rgb; 1064 } set_rgb;
1065
1066 struct {
1067 uint8_t led;
1068 } get_rgb;
1021 1069
1022 struct lightbar_params set_params; 1070 struct lightbar_params_v0 set_params_v0;
1071 struct lightbar_params_v1 set_params_v1;
1023 }; 1072 };
1024} __packed; 1073} __packed;
1025 1074
1026struct ec_response_lightbar { 1075struct ec_response_lightbar {
1027 union { 1076 union {
1028 struct dump { 1077 struct {
1029 struct { 1078 struct {
1030 uint8_t reg; 1079 uint8_t reg;
1031 uint8_t ic0; 1080 uint8_t ic0;
@@ -1033,20 +1082,26 @@ struct ec_response_lightbar {
1033 } vals[23]; 1082 } vals[23];
1034 } dump; 1083 } dump;
1035 1084
1036 struct get_seq { 1085 struct {
1037 uint8_t num; 1086 uint8_t num;
1038 } get_seq; 1087 } get_seq, get_brightness, get_demo;
1039 1088
1040 struct lightbar_params get_params; 1089 struct lightbar_params_v0 get_params_v0;
1090 struct lightbar_params_v1 get_params_v1;
1041 1091
1042 struct version { 1092 struct {
1043 uint32_t num; 1093 uint32_t num;
1044 uint32_t flags; 1094 uint32_t flags;
1045 } version; 1095 } version;
1046 1096
1047 struct { 1097 struct {
1098 uint8_t red, green, blue;
1099 } get_rgb;
1100
1101 struct {
1048 /* no return params */ 1102 /* no return params */
1049 } off, on, init, brightness, seq, reg, rgb, demo, set_params; 1103 } off, on, init, set_brightness, seq, reg, set_rgb,
1104 demo, set_params_v0, set_params_v1;
1050 }; 1105 };
1051} __packed; 1106} __packed;
1052 1107
@@ -1056,15 +1111,20 @@ enum lightbar_command {
1056 LIGHTBAR_CMD_OFF = 1, 1111 LIGHTBAR_CMD_OFF = 1,
1057 LIGHTBAR_CMD_ON = 2, 1112 LIGHTBAR_CMD_ON = 2,
1058 LIGHTBAR_CMD_INIT = 3, 1113 LIGHTBAR_CMD_INIT = 3,
1059 LIGHTBAR_CMD_BRIGHTNESS = 4, 1114 LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
1060 LIGHTBAR_CMD_SEQ = 5, 1115 LIGHTBAR_CMD_SEQ = 5,
1061 LIGHTBAR_CMD_REG = 6, 1116 LIGHTBAR_CMD_REG = 6,
1062 LIGHTBAR_CMD_RGB = 7, 1117 LIGHTBAR_CMD_SET_RGB = 7,
1063 LIGHTBAR_CMD_GET_SEQ = 8, 1118 LIGHTBAR_CMD_GET_SEQ = 8,
1064 LIGHTBAR_CMD_DEMO = 9, 1119 LIGHTBAR_CMD_DEMO = 9,
1065 LIGHTBAR_CMD_GET_PARAMS = 10, 1120 LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
1066 LIGHTBAR_CMD_SET_PARAMS = 11, 1121 LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
1067 LIGHTBAR_CMD_VERSION = 12, 1122 LIGHTBAR_CMD_VERSION = 12,
1123 LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
1124 LIGHTBAR_CMD_GET_RGB = 14,
1125 LIGHTBAR_CMD_GET_DEMO = 15,
1126 LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
1127 LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
1068 LIGHTBAR_NUM_CMDS 1128 LIGHTBAR_NUM_CMDS
1069}; 1129};
1070 1130
@@ -1421,8 +1481,40 @@ struct ec_response_rtc {
1421/*****************************************************************************/ 1481/*****************************************************************************/
1422/* Port80 log access */ 1482/* Port80 log access */
1423 1483
1484/* Maximum entries that can be read/written in a single command */
1485#define EC_PORT80_SIZE_MAX 32
1486
1424/* Get last port80 code from previous boot */ 1487/* Get last port80 code from previous boot */
1425#define EC_CMD_PORT80_LAST_BOOT 0x48 1488#define EC_CMD_PORT80_LAST_BOOT 0x48
1489#define EC_CMD_PORT80_READ 0x48
1490
1491enum ec_port80_subcmd {
1492 EC_PORT80_GET_INFO = 0,
1493 EC_PORT80_READ_BUFFER,
1494};
1495
1496struct ec_params_port80_read {
1497 uint16_t subcmd;
1498 union {
1499 struct {
1500 uint32_t offset;
1501 uint32_t num_entries;
1502 } read_buffer;
1503 };
1504} __packed;
1505
1506struct ec_response_port80_read {
1507 union {
1508 struct {
1509 uint32_t writes;
1510 uint32_t history_size;
1511 uint32_t last_boot;
1512 } get_info;
1513 struct {
1514 uint16_t codes[EC_PORT80_SIZE_MAX];
1515 } data;
1516 };
1517} __packed;
1426 1518
1427struct ec_response_port80_last_boot { 1519struct ec_response_port80_last_boot {
1428 uint16_t code; 1520 uint16_t code;
@@ -1782,6 +1874,7 @@ struct ec_params_gpio_set {
1782/* Get GPIO value */ 1874/* Get GPIO value */
1783#define EC_CMD_GPIO_GET 0x93 1875#define EC_CMD_GPIO_GET 0x93
1784 1876
1877/* Version 0 of input params and response */
1785struct ec_params_gpio_get { 1878struct ec_params_gpio_get {
1786 char name[32]; 1879 char name[32];
1787} __packed; 1880} __packed;
@@ -1789,6 +1882,38 @@ struct ec_response_gpio_get {
1789 uint8_t val; 1882 uint8_t val;
1790} __packed; 1883} __packed;
1791 1884
1885/* Version 1 of input params and response */
1886struct ec_params_gpio_get_v1 {
1887 uint8_t subcmd;
1888 union {
1889 struct {
1890 char name[32];
1891 } get_value_by_name;
1892 struct {
1893 uint8_t index;
1894 } get_info;
1895 };
1896} __packed;
1897
1898struct ec_response_gpio_get_v1 {
1899 union {
1900 struct {
1901 uint8_t val;
1902 } get_value_by_name, get_count;
1903 struct {
1904 uint8_t val;
1905 char name[32];
1906 uint32_t flags;
1907 } get_info;
1908 };
1909} __packed;
1910
1911enum gpio_get_subcmd {
1912 EC_GPIO_GET_BY_NAME = 0,
1913 EC_GPIO_GET_COUNT = 1,
1914 EC_GPIO_GET_INFO = 2,
1915};
1916
1792/*****************************************************************************/ 1917/*****************************************************************************/
1793/* I2C commands. Only available when flash write protect is unlocked. */ 1918/* I2C commands. Only available when flash write protect is unlocked. */
1794 1919
@@ -1857,13 +1982,21 @@ struct ec_params_charge_control {
1857/*****************************************************************************/ 1982/*****************************************************************************/
1858 1983
1859/* 1984/*
1860 * Cut off battery power output if the battery supports. 1985 * Cut off battery power immediately or after the host has shut down.
1861 * 1986 *
1862 * For unsupported battery, just don't implement this command and lets EC 1987 * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
1863 * return EC_RES_INVALID_COMMAND. 1988 * EC_RES_SUCCESS if the command was successful.
1989 * EC_RES_ERROR if the cut off command failed.
1864 */ 1990 */
1991
1865#define EC_CMD_BATTERY_CUT_OFF 0x99 1992#define EC_CMD_BATTERY_CUT_OFF 0x99
1866 1993
1994#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
1995
1996struct ec_params_battery_cutoff {
1997 uint8_t flags;
1998} __packed;
1999
1867/*****************************************************************************/ 2000/*****************************************************************************/
1868/* USB port mux control. */ 2001/* USB port mux control. */
1869 2002
@@ -2142,6 +2275,32 @@ struct ec_params_sb_wr_block {
2142} __packed; 2275} __packed;
2143 2276
2144/*****************************************************************************/ 2277/*****************************************************************************/
2278/* Battery vendor parameters
2279 *
2280 * Get or set vendor-specific parameters in the battery. Implementations may
2281 * differ between boards or batteries. On a set operation, the response
2282 * contains the actual value set, which may be rounded or clipped from the
2283 * requested value.
2284 */
2285
2286#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
2287
2288enum ec_battery_vendor_param_mode {
2289 BATTERY_VENDOR_PARAM_MODE_GET = 0,
2290 BATTERY_VENDOR_PARAM_MODE_SET,
2291};
2292
2293struct ec_params_battery_vendor_param {
2294 uint32_t param;
2295 uint32_t value;
2296 uint8_t mode;
2297} __packed;
2298
2299struct ec_response_battery_vendor_param {
2300 uint32_t value;
2301} __packed;
2302
2303/*****************************************************************************/
2145/* System commands */ 2304/* System commands */
2146 2305
2147/* 2306/*
@@ -2338,6 +2497,80 @@ struct ec_params_reboot_ec {
2338 2497
2339/*****************************************************************************/ 2498/*****************************************************************************/
2340/* 2499/*
2500 * PD commands
2501 *
2502 * These commands are for PD MCU communication.
2503 */
2504
2505/* EC to PD MCU exchange status command */
2506#define EC_CMD_PD_EXCHANGE_STATUS 0x100
2507
2508/* Status of EC being sent to PD */
2509struct ec_params_pd_status {
2510 int8_t batt_soc; /* battery state of charge */
2511} __packed;
2512
2513/* Status of PD being sent back to EC */
2514struct ec_response_pd_status {
2515 int8_t status; /* PD MCU status */
2516 uint32_t curr_lim_ma; /* input current limit */
2517} __packed;
2518
2519/* Set USB type-C port role and muxes */
2520#define EC_CMD_USB_PD_CONTROL 0x101
2521
2522enum usb_pd_control_role {
2523 USB_PD_CTRL_ROLE_NO_CHANGE = 0,
2524 USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
2525 USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
2526 USB_PD_CTRL_ROLE_FORCE_SINK = 3,
2527 USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
2528};
2529
2530enum usb_pd_control_mux {
2531 USB_PD_CTRL_MUX_NO_CHANGE = 0,
2532 USB_PD_CTRL_MUX_NONE = 1,
2533 USB_PD_CTRL_MUX_USB = 2,
2534 USB_PD_CTRL_MUX_DP = 3,
2535 USB_PD_CTRL_MUX_DOCK = 4,
2536 USB_PD_CTRL_MUX_AUTO = 5,
2537};
2538
2539struct ec_params_usb_pd_control {
2540 uint8_t port;
2541 uint8_t role;
2542 uint8_t mux;
2543} __packed;
2544
2545/*****************************************************************************/
2546/*
2547 * Passthru commands
2548 *
2549 * Some platforms have sub-processors chained to each other. For example.
2550 *
2551 * AP <--> EC <--> PD MCU
2552 *
2553 * The top 2 bits of the command number are used to indicate which device the
2554 * command is intended for. Device 0 is always the device receiving the
2555 * command; other device mapping is board-specific.
2556 *
2557 * When a device receives a command to be passed to a sub-processor, it passes
2558 * it on with the device number set back to 0. This allows the sub-processor
2559 * to remain blissfully unaware of whether the command originated on the next
2560 * device up the chain, or was passed through from the AP.
2561 *
2562 * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
2563 * AP sends command 0x4002 to the EC
2564 * EC sends command 0x0002 to the PD MCU
2565 * EC forwards PD MCU response back to the AP
2566 */
2567
2568/* Offset and max command number for sub-device n */
2569#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
2570#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
2571
2572/*****************************************************************************/
2573/*
2341 * Deprecated constants. These constants have been renamed for clarity. The 2574 * Deprecated constants. These constants have been renamed for clarity. The
2342 * meaning and size has not changed. Programs that use the old names should 2575 * meaning and size has not changed. Programs that use the old names should
2343 * switch to the new names soon, as the old names may not be carried forward 2576 * switch to the new names soon, as the old names may not be carried forward
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index 956afa445998..5dc743fd63a6 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -89,6 +89,6 @@ static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg,
89int da9055_device_init(struct da9055 *da9055); 89int da9055_device_init(struct da9055 *da9055);
90void da9055_device_exit(struct da9055 *da9055); 90void da9055_device_exit(struct da9055 *da9055);
91 91
92extern struct regmap_config da9055_regmap_config; 92extern const struct regmap_config da9055_regmap_config;
93 93
94#endif /* __DA9055_CORE_H */ 94#endif /* __DA9055_CORE_H */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 95c8742215a7..612383bd80ae 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -103,6 +103,7 @@ struct da9063;
103struct da9063_pdata { 103struct da9063_pdata {
104 int (*init)(struct da9063 *da9063); 104 int (*init)(struct da9063 *da9063);
105 int irq_base; 105 int irq_base;
106 bool key_power;
106 unsigned flags; 107 unsigned flags;
107 struct da9063_regulators_pdata *regulators_pdata; 108 struct da9063_regulators_pdata *regulators_pdata;
108 struct led_platform_data *leds_pdata; 109 struct led_platform_data *leds_pdata;
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index bb995ab9a575..d4b72d519115 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -125,9 +125,4 @@ enum max77686_opmode {
125 MAX77686_OPMODE_STANDBY, 125 MAX77686_OPMODE_STANDBY,
126}; 126};
127 127
128struct max77686_opmode_data {
129 int id;
130 int mode;
131};
132
133#endif /* __LINUX_MFD_MAX77686_H */ 128#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index c9d869027300..cb83883918a7 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -118,47 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
118#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) 118#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
119 119
120/** 120/**
121 * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
122 * data
123 * @sample_time: ADC converstion time in number of clock.
124 * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
125 * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
126 * recommended is 4.
127 * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
128 * @ref_sel: ADC reference source
129 * (0 -> internal reference, 1 -> external reference)
130 * @adc_freq: ADC Clock speed
131 * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
132 * @ave_ctrl: Sample average control
133 * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
134 * @touch_det_delay: Touch detect interrupt delay
135 * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
136 * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
137 * recommended is 3
138 * @settling: Panel driver settling time
139 * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
140 * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
141 * recommended is 2
142 * @fraction_z: Length of the fractional part in z
143 * (fraction_z ([0..7]) = Count of the fractional part)
144 * recommended is 7
145 * @i_drive: current limit value of the touchscreen drivers
146 * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
147 *
148 * */
149struct stmpe_ts_platform_data {
150 u8 sample_time;
151 u8 mod_12b;
152 u8 ref_sel;
153 u8 adc_freq;
154 u8 ave_ctrl;
155 u8 touch_det_delay;
156 u8 settling;
157 u8 fraction_z;
158 u8 i_drive;
159};
160
161/**
162 * struct stmpe_platform_data - STMPE platform data 121 * struct stmpe_platform_data - STMPE platform data
163 * @id: device id to distinguish between multiple STMPEs on the same board 122 * @id: device id to distinguish between multiple STMPEs on the same board
164 * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) 123 * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
@@ -168,7 +127,6 @@ struct stmpe_ts_platform_data {
168 * @irq_over_gpio: true if gpio is used to get irq 127 * @irq_over_gpio: true if gpio is used to get irq
169 * @irq_gpio: gpio number over which irq will be requested (significant only if 128 * @irq_gpio: gpio number over which irq will be requested (significant only if
170 * irq_over_gpio is true) 129 * irq_over_gpio is true)
171 * @ts: touchscreen-specific platform data
172 */ 130 */
173struct stmpe_platform_data { 131struct stmpe_platform_data {
174 int id; 132 int id;
@@ -178,8 +136,6 @@ struct stmpe_platform_data {
178 bool irq_over_gpio; 136 bool irq_over_gpio;
179 int irq_gpio; 137 int irq_gpio;
180 int autosleep_timeout; 138 int autosleep_timeout;
181
182 struct stmpe_ts_platform_data *ts;
183}; 139};
184 140
185#endif 141#endif
diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h
new file mode 100644
index 000000000000..afd9b8f1e363
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-mc.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright (C) 2005 Ivan Kokshaysky
3 * Copyright (C) SAN People
4 *
5 * Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals
6 * registers.
7 * Based on AT91RM9200 datasheet revision E.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_
16#define _LINUX_MFD_SYSCON_ATMEL_MC_H_
17
18/* Memory Controller */
19#define AT91_MC_RCR 0x00
20#define AT91_MC_RCB BIT(0)
21
22#define AT91_MC_ASR 0x04
23#define AT91_MC_UNADD BIT(0)
24#define AT91_MC_MISADD BIT(1)
25#define AT91_MC_ABTSZ GENMASK(9, 8)
26#define AT91_MC_ABTSZ_BYTE (0 << 8)
27#define AT91_MC_ABTSZ_HALFWORD (1 << 8)
28#define AT91_MC_ABTSZ_WORD (2 << 8)
29#define AT91_MC_ABTTYP GENMASK(11, 10)
30#define AT91_MC_ABTTYP_DATAREAD (0 << 10)
31#define AT91_MC_ABTTYP_DATAWRITE (1 << 10)
32#define AT91_MC_ABTTYP_FETCH (2 << 10)
33#define AT91_MC_MST(n) BIT(16 + (n))
34#define AT91_MC_SVMST(n) BIT(24 + (n))
35
36#define AT91_MC_AASR 0x08
37
38#define AT91_MC_MPR 0x0c
39#define AT91_MPR_MSTP(n) GENMASK(2 + ((x) * 4), ((x) * 4))
40
41/* External Bus Interface (EBI) registers */
42#define AT91_MC_EBI_CSA 0x60
43#define AT91_MC_EBI_CS(n) BIT(x)
44#define AT91_MC_EBI_NUM_CS 8
45
46#define AT91_MC_EBI_CFGR 0x64
47#define AT91_MC_EBI_DBPUC BIT(0)
48
49/* Static Memory Controller (SMC) registers */
50#define AT91_MC_SMC_CSR(n) (0x70 + ((n) * 4))
51#define AT91_MC_SMC_NWS GENMASK(6, 0)
52#define AT91_MC_SMC_NWS_(x) ((x) << 0)
53#define AT91_MC_SMC_WSEN BIT(7)
54#define AT91_MC_SMC_TDF GENMASK(11, 8)
55#define AT91_MC_SMC_TDF_(x) ((x) << 8)
56#define AT91_MC_SMC_TDF_MAX 0xf
57#define AT91_MC_SMC_BAT BIT(12)
58#define AT91_MC_SMC_DBW GENMASK(14, 13)
59#define AT91_MC_SMC_DBW_16 (1 << 13)
60#define AT91_MC_SMC_DBW_8 (2 << 13)
61#define AT91_MC_SMC_DPR BIT(15)
62#define AT91_MC_SMC_ACSS GENMASK(17, 16)
63#define AT91_MC_SMC_ACSS_(x) ((x) << 16)
64#define AT91_MC_SMC_ACSS_MAX 3
65#define AT91_MC_SMC_RWSETUP GENMASK(26, 24)
66#define AT91_MC_SMC_RWSETUP_(x) ((x) << 24)
67#define AT91_MC_SMC_RWHOLD GENMASK(30, 28)
68#define AT91_MC_SMC_RWHOLD_(x) ((x) << 28)
69#define AT91_MC_SMC_RWHOLDSETUP_MAX 7
70
71/* SDRAM Controller registers */
72#define AT91_MC_SDRAMC_MR 0x90
73#define AT91_MC_SDRAMC_MODE GENMASK(3, 0)
74#define AT91_MC_SDRAMC_MODE_NORMAL (0 << 0)
75#define AT91_MC_SDRAMC_MODE_NOP (1 << 0)
76#define AT91_MC_SDRAMC_MODE_PRECHARGE (2 << 0)
77#define AT91_MC_SDRAMC_MODE_LMR (3 << 0)
78#define AT91_MC_SDRAMC_MODE_REFRESH (4 << 0)
79#define AT91_MC_SDRAMC_DBW_16 BIT(4)
80
81#define AT91_MC_SDRAMC_TR 0x94
82#define AT91_MC_SDRAMC_COUNT GENMASK(11, 0)
83
84#define AT91_MC_SDRAMC_CR 0x98
85#define AT91_MC_SDRAMC_NC GENMASK(1, 0)
86#define AT91_MC_SDRAMC_NC_8 (0 << 0)
87#define AT91_MC_SDRAMC_NC_9 (1 << 0)
88#define AT91_MC_SDRAMC_NC_10 (2 << 0)
89#define AT91_MC_SDRAMC_NC_11 (3 << 0)
90#define AT91_MC_SDRAMC_NR GENMASK(3, 2)
91#define AT91_MC_SDRAMC_NR_11 (0 << 2)
92#define AT91_MC_SDRAMC_NR_12 (1 << 2)
93#define AT91_MC_SDRAMC_NR_13 (2 << 2)
94#define AT91_MC_SDRAMC_NB BIT(4)
95#define AT91_MC_SDRAMC_NB_2 (0 << 4)
96#define AT91_MC_SDRAMC_NB_4 (1 << 4)
97#define AT91_MC_SDRAMC_CAS GENMASK(6, 5)
98#define AT91_MC_SDRAMC_CAS_2 (2 << 5)
99#define AT91_MC_SDRAMC_TWR GENMASK(10, 7)
100#define AT91_MC_SDRAMC_TRC GENMASK(14, 11)
101#define AT91_MC_SDRAMC_TRP GENMASK(18, 15)
102#define AT91_MC_SDRAMC_TRCD GENMASK(22, 19)
103#define AT91_MC_SDRAMC_TRAS GENMASK(26, 23)
104#define AT91_MC_SDRAMC_TXSR GENMASK(30, 27)
105
106#define AT91_MC_SDRAMC_SRR 0x9c
107#define AT91_MC_SDRAMC_SRCB BIT(0)
108
109#define AT91_MC_SDRAMC_LPR 0xa0
110#define AT91_MC_SDRAMC_LPCB BIT(0)
111
112#define AT91_MC_SDRAMC_IER 0xa4
113#define AT91_MC_SDRAMC_IDR 0xa8
114#define AT91_MC_SDRAMC_IMR 0xac
115#define AT91_MC_SDRAMC_ISR 0xb0
116#define AT91_MC_SDRAMC_RES BIT(0)
117
118/* Burst Flash Controller register */
119#define AT91_MC_BFC_MR 0xc0
120#define AT91_MC_BFC_BFCOM GENMASK(1, 0)
121#define AT91_MC_BFC_BFCOM_DISABLED (0 << 0)
122#define AT91_MC_BFC_BFCOM_ASYNC (1 << 0)
123#define AT91_MC_BFC_BFCOM_BURST (2 << 0)
124#define AT91_MC_BFC_BFCC GENMASK(3, 2)
125#define AT91_MC_BFC_BFCC_MCK (1 << 2)
126#define AT91_MC_BFC_BFCC_DIV2 (2 << 2)
127#define AT91_MC_BFC_BFCC_DIV4 (3 << 2)
128#define AT91_MC_BFC_AVL GENMASK(7, 4)
129#define AT91_MC_BFC_PAGES GENMASK(10, 8)
130#define AT91_MC_BFC_PAGES_NO_PAGE (0 << 8)
131#define AT91_MC_BFC_PAGES_16 (1 << 8)
132#define AT91_MC_BFC_PAGES_32 (2 << 8)
133#define AT91_MC_BFC_PAGES_64 (3 << 8)
134#define AT91_MC_BFC_PAGES_128 (4 << 8)
135#define AT91_MC_BFC_PAGES_256 (5 << 8)
136#define AT91_MC_BFC_PAGES_512 (6 << 8)
137#define AT91_MC_BFC_PAGES_1024 (7 << 8)
138#define AT91_MC_BFC_OEL GENMASK(13, 12)
139#define AT91_MC_BFC_BAAEN BIT(16)
140#define AT91_MC_BFC_BFOEH BIT(17)
141#define AT91_MC_BFC_MUXEN BIT(18)
142#define AT91_MC_BFC_RDYEN BIT(19)
143
144#endif /* _LINUX_MFD_SYSCON_ATMEL_MC_H_ */
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index f62e7cf227c6..58391f2e0414 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -35,6 +35,8 @@
35 35
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/if_link.h> 37#include <linux/if_link.h>
38#include <linux/mlx4/device.h>
39#include <linux/netdevice.h>
38 40
39enum { 41enum {
40 /* initialization and general commands */ 42 /* initialization and general commands */
@@ -300,6 +302,10 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para
300struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); 302struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
301void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); 303void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
302 304
305int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
306 struct mlx4_counter *counter_stats, int reset);
307int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
308 struct ifla_vf_stats *vf_stats);
303u32 mlx4_comm_get_version(void); 309u32 mlx4_comm_get_version(void);
304int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); 310int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
305int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); 311int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 83e80ab94500..fd13c1ce3b4a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -46,8 +46,9 @@
46 46
47#define MAX_MSIX_P_PORT 17 47#define MAX_MSIX_P_PORT 17
48#define MAX_MSIX 64 48#define MAX_MSIX 64
49#define MSIX_LEGACY_SZ 4
50#define MIN_MSIX_P_PORT 5 49#define MIN_MSIX_P_PORT 5
50#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
51 (dev_cap).num_ports * MIN_MSIX_P_PORT)
51 52
52#define MLX4_MAX_100M_UNITS_VAL 255 /* 53#define MLX4_MAX_100M_UNITS_VAL 255 /*
53 * work around: can't set values 54 * work around: can't set values
@@ -528,7 +529,6 @@ struct mlx4_caps {
528 int num_eqs; 529 int num_eqs;
529 int reserved_eqs; 530 int reserved_eqs;
530 int num_comp_vectors; 531 int num_comp_vectors;
531 int comp_pool;
532 int num_mpts; 532 int num_mpts;
533 int max_fmr_maps; 533 int max_fmr_maps;
534 int num_mtts; 534 int num_mtts;
@@ -771,6 +771,14 @@ union mlx4_ext_av {
771 struct mlx4_eth_av eth; 771 struct mlx4_eth_av eth;
772}; 772};
773 773
774/* Counters should be saturate once they reach their maximum value */
775#define ASSIGN_32BIT_COUNTER(counter, value) do { \
776 if ((value) > U32_MAX) \
777 counter = cpu_to_be32(U32_MAX); \
778 else \
779 counter = cpu_to_be32(value); \
780} while (0)
781
774struct mlx4_counter { 782struct mlx4_counter {
775 u8 reserved1[3]; 783 u8 reserved1[3];
776 u8 counter_mode; 784 u8 counter_mode;
@@ -829,6 +837,12 @@ struct mlx4_dev {
829 struct mlx4_vf_dev *dev_vfs; 837 struct mlx4_vf_dev *dev_vfs;
830}; 838};
831 839
840struct mlx4_clock_params {
841 u64 offset;
842 u8 bar;
843 u8 size;
844};
845
832struct mlx4_eqe { 846struct mlx4_eqe {
833 u8 reserved1; 847 u8 reserved1;
834 u8 type; 848 u8 type;
@@ -957,6 +971,7 @@ struct mlx4_mad_ifc {
957 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 971 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
958 972
959#define MLX4_INVALID_SLAVE_ID 0xFF 973#define MLX4_INVALID_SLAVE_ID 0xFF
974#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
960 975
961void handle_port_mgmt_change_event(struct work_struct *work); 976void handle_port_mgmt_change_event(struct work_struct *work);
962 977
@@ -1332,10 +1347,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1332int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 1347int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1333int mlx4_SYNC_TPT(struct mlx4_dev *dev); 1348int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1334int mlx4_test_interrupts(struct mlx4_dev *dev); 1349int mlx4_test_interrupts(struct mlx4_dev *dev);
1335int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, 1350u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
1336 int *vector); 1351bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
1352struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
1353int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
1337void mlx4_release_eq(struct mlx4_dev *dev, int vec); 1354void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1338 1355
1356int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
1339int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); 1357int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1340 1358
1341int mlx4_get_phys_port_id(struct mlx4_dev *dev); 1359int mlx4_get_phys_port_id(struct mlx4_dev *dev);
@@ -1344,6 +1362,7 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
1344 1362
1345int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); 1363int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
1346void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); 1364void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
1365int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
1347 1366
1348void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, 1367void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
1349 int port); 1368 int port);
@@ -1485,4 +1504,7 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1485 enum mlx4_access_reg_method method, 1504 enum mlx4_access_reg_method method,
1486 struct mlx4_ptys_reg *ptys_reg); 1505 struct mlx4_ptys_reg *ptys_reg);
1487 1506
1507int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1508 struct mlx4_clock_params *params);
1509
1488#endif /* MLX4_DEVICE_H */ 1510#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2695ced222df..abc4767695e4 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
169 struct mlx5_query_cq_mbox_out *out); 169 struct mlx5_query_cq_mbox_out *out);
170int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 170int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
171 struct mlx5_modify_cq_mbox_in *in, int in_sz); 171 struct mlx5_modify_cq_mbox_in *in, int in_sz);
172int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
173 struct mlx5_core_cq *cq, u16 cq_period,
174 u16 cq_max_count);
172int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 175int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
173void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 176void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
174 177
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index abf65c790421..b943cd9e2097 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -35,6 +35,7 @@
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <rdma/ib_verbs.h> 37#include <rdma/ib_verbs.h>
38#include <linux/mlx5/mlx5_ifc.h>
38 39
39#if defined(__LITTLE_ENDIAN) 40#if defined(__LITTLE_ENDIAN)
40#define MLX5_SET_HOST_ENDIANNESS 0 41#define MLX5_SET_HOST_ENDIANNESS 0
@@ -58,6 +59,8 @@
58#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 59#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
59#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 60#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
60#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 61#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
62#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
63#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
61#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 64#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
62#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) 65#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
63 66
@@ -70,6 +73,14 @@
70 << __mlx5_dw_bit_off(typ, fld))); \ 73 << __mlx5_dw_bit_off(typ, fld))); \
71} while (0) 74} while (0)
72 75
76#define MLX5_SET_TO_ONES(typ, p, fld) do { \
77 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
78 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
79 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
80 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
81 << __mlx5_dw_bit_off(typ, fld))); \
82} while (0)
83
73#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ 84#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
74__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 85__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
75__mlx5_mask(typ, fld)) 86__mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@ __mlx5_mask(typ, fld))
88 99
89#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) 100#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
90 101
102#define MLX5_GET64_PR(typ, p, fld) ({ \
103 u64 ___t = MLX5_GET64(typ, p, fld); \
104 pr_debug(#fld " = 0x%llx\n", ___t); \
105 ___t; \
106})
107
91enum { 108enum {
92 MLX5_MAX_COMMANDS = 32, 109 MLX5_MAX_COMMANDS = 32,
93 MLX5_CMD_DATA_BLOCK_SIZE = 512, 110 MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -115,6 +132,10 @@ enum {
115}; 132};
116 133
117enum { 134enum {
135 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
136};
137
138enum {
118 MLX5_MIN_PKEY_TABLE_SIZE = 128, 139 MLX5_MIN_PKEY_TABLE_SIZE = 128,
119 MLX5_MAX_LOG_PKEY_TABLE = 5, 140 MLX5_MAX_LOG_PKEY_TABLE = 5,
120}; 141};
@@ -264,6 +285,7 @@ enum {
264 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, 285 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
265 MLX5_OPCODE_SEND = 0x0a, 286 MLX5_OPCODE_SEND = 0x0a,
266 MLX5_OPCODE_SEND_IMM = 0x0b, 287 MLX5_OPCODE_SEND_IMM = 0x0b,
288 MLX5_OPCODE_LSO = 0x0e,
267 MLX5_OPCODE_RDMA_READ = 0x10, 289 MLX5_OPCODE_RDMA_READ = 0x10,
268 MLX5_OPCODE_ATOMIC_CS = 0x11, 290 MLX5_OPCODE_ATOMIC_CS = 0x11,
269 MLX5_OPCODE_ATOMIC_FA = 0x12, 291 MLX5_OPCODE_ATOMIC_FA = 0x12,
@@ -312,13 +334,6 @@ enum {
312 MLX5_CAP_OFF_CMDIF_CSUM = 46, 334 MLX5_CAP_OFF_CMDIF_CSUM = 46,
313}; 335};
314 336
315enum {
316 HCA_CAP_OPMOD_GET_MAX = 0,
317 HCA_CAP_OPMOD_GET_CUR = 1,
318 HCA_CAP_OPMOD_GET_ODP_MAX = 4,
319 HCA_CAP_OPMOD_GET_ODP_CUR = 5
320};
321
322struct mlx5_inbox_hdr { 337struct mlx5_inbox_hdr {
323 __be16 opcode; 338 __be16 opcode;
324 u8 rsvd[4]; 339 u8 rsvd[4];
@@ -541,6 +556,10 @@ struct mlx5_cmd_prot_block {
541 u8 sig; 556 u8 sig;
542}; 557};
543 558
559enum {
560 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
561};
562
544struct mlx5_err_cqe { 563struct mlx5_err_cqe {
545 u8 rsvd0[32]; 564 u8 rsvd0[32];
546 __be32 srqn; 565 __be32 srqn;
@@ -554,13 +573,22 @@ struct mlx5_err_cqe {
554}; 573};
555 574
556struct mlx5_cqe64 { 575struct mlx5_cqe64 {
557 u8 rsvd0[17]; 576 u8 rsvd0[4];
577 u8 lro_tcppsh_abort_dupack;
578 u8 lro_min_ttl;
579 __be16 lro_tcp_win;
580 __be32 lro_ack_seq_num;
581 __be32 rss_hash_result;
582 u8 rss_hash_type;
558 u8 ml_path; 583 u8 ml_path;
559 u8 rsvd20[4]; 584 u8 rsvd20[2];
585 __be16 check_sum;
560 __be16 slid; 586 __be16 slid;
561 __be32 flags_rqpn; 587 __be32 flags_rqpn;
562 u8 rsvd28[4]; 588 u8 hds_ip_ext;
563 __be32 srqn; 589 u8 l4_hdr_type_etc;
590 __be16 vlan_info;
591 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
564 __be32 imm_inval_pkey; 592 __be32 imm_inval_pkey;
565 u8 rsvd40[4]; 593 u8 rsvd40[4];
566 __be32 byte_cnt; 594 __be32 byte_cnt;
@@ -571,6 +599,40 @@ struct mlx5_cqe64 {
571 u8 op_own; 599 u8 op_own;
572}; 600};
573 601
602static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
603{
604 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
605}
606
607static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
608{
609 return (cqe->l4_hdr_type_etc >> 4) & 0x7;
610}
611
612static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
613{
614 return !!(cqe->l4_hdr_type_etc & 0x1);
615}
616
617enum {
618 CQE_L4_HDR_TYPE_NONE = 0x0,
619 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
620 CQE_L4_HDR_TYPE_UDP = 0x2,
621 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
622 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
623};
624
625enum {
626 CQE_RSS_HTYPE_IP = 0x3 << 6,
627 CQE_RSS_HTYPE_L4 = 0x3 << 2,
628};
629
630enum {
631 CQE_L2_OK = 1 << 0,
632 CQE_L3_OK = 1 << 1,
633 CQE_L4_OK = 1 << 2,
634};
635
574struct mlx5_sig_err_cqe { 636struct mlx5_sig_err_cqe {
575 u8 rsvd0[16]; 637 u8 rsvd0[16];
576 __be32 expected_trans_sig; 638 __be32 expected_trans_sig;
@@ -996,4 +1058,135 @@ struct mlx5_destroy_psv_out {
996 u8 rsvd[8]; 1058 u8 rsvd[8];
997}; 1059};
998 1060
1061#define MLX5_CMD_OP_MAX 0x920
1062
1063enum {
1064 VPORT_STATE_DOWN = 0x0,
1065 VPORT_STATE_UP = 0x1,
1066};
1067
1068enum {
1069 MLX5_L3_PROT_TYPE_IPV4 = 0,
1070 MLX5_L3_PROT_TYPE_IPV6 = 1,
1071};
1072
1073enum {
1074 MLX5_L4_PROT_TYPE_TCP = 0,
1075 MLX5_L4_PROT_TYPE_UDP = 1,
1076};
1077
1078enum {
1079 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1080 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1081 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1082 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1083 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1084};
1085
1086enum {
1087 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1088 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1089 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1090
1091};
1092
1093enum {
1094 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1095 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1096};
1097
1098enum {
1099 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
1100 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
1101 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1102};
1103
1104enum {
1105 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1106 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1107};
1108
1109/* MLX5 DEV CAPs */
1110
1111/* TODO: EAT.ME */
1112enum mlx5_cap_mode {
1113 HCA_CAP_OPMOD_GET_MAX = 0,
1114 HCA_CAP_OPMOD_GET_CUR = 1,
1115};
1116
1117enum mlx5_cap_type {
1118 MLX5_CAP_GENERAL = 0,
1119 MLX5_CAP_ETHERNET_OFFLOADS,
1120 MLX5_CAP_ODP,
1121 MLX5_CAP_ATOMIC,
1122 MLX5_CAP_ROCE,
1123 MLX5_CAP_IPOIB_OFFLOADS,
1124 MLX5_CAP_EOIB_OFFLOADS,
1125 MLX5_CAP_FLOW_TABLE,
1126 /* NUM OF CAP Types */
1127 MLX5_CAP_NUM
1128};
1129
1130/* GET Dev Caps macros */
1131#define MLX5_CAP_GEN(mdev, cap) \
1132 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1133
1134#define MLX5_CAP_GEN_MAX(mdev, cap) \
1135 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1136
1137#define MLX5_CAP_ETH(mdev, cap) \
1138 MLX5_GET(per_protocol_networking_offload_caps,\
1139 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1140
1141#define MLX5_CAP_ETH_MAX(mdev, cap) \
1142 MLX5_GET(per_protocol_networking_offload_caps,\
1143 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1144
1145#define MLX5_CAP_ROCE(mdev, cap) \
1146 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1147
1148#define MLX5_CAP_ROCE_MAX(mdev, cap) \
1149 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1150
1151#define MLX5_CAP_ATOMIC(mdev, cap) \
1152 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1153
1154#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1155 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1156
1157#define MLX5_CAP_FLOWTABLE(mdev, cap) \
1158 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1159
1160#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1161 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1162
1163#define MLX5_CAP_ODP(mdev, cap)\
1164 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1165
1166enum {
1167 MLX5_CMD_STAT_OK = 0x0,
1168 MLX5_CMD_STAT_INT_ERR = 0x1,
1169 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1170 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1171 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1172 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1173 MLX5_CMD_STAT_RES_BUSY = 0x6,
1174 MLX5_CMD_STAT_LIM_ERR = 0x8,
1175 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1176 MLX5_CMD_STAT_IX_ERR = 0xa,
1177 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1178 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1179 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1180 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1181 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1182 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1183};
1184
1185static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1186{
1187 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1188 return 0;
1189 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1190}
1191
999#endif /* MLX5_DEVICE_H */ 1192#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9a90e7523dc2..5722d88c2429 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,7 +44,6 @@
44 44
45#include <linux/mlx5/device.h> 45#include <linux/mlx5/device.h>
46#include <linux/mlx5/doorbell.h> 46#include <linux/mlx5/doorbell.h>
47#include <linux/mlx5/mlx5_ifc.h>
48 47
49enum { 48enum {
50 MLX5_BOARD_ID_LEN = 64, 49 MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
85}; 84};
86 85
87enum { 86enum {
88 MLX5_MAX_EQ_NAME = 32 87 MLX5_MAX_IRQ_NAME = 32
89}; 88};
90 89
91enum { 90enum {
@@ -108,6 +107,7 @@ enum {
108 MLX5_REG_PUDE = 0x5009, 107 MLX5_REG_PUDE = 0x5009,
109 MLX5_REG_PMPE = 0x5010, 108 MLX5_REG_PMPE = 0x5010,
110 MLX5_REG_PELC = 0x500e, 109 MLX5_REG_PELC = 0x500e,
110 MLX5_REG_PVLC = 0x500f,
111 MLX5_REG_PMLP = 0, /* TBD */ 111 MLX5_REG_PMLP = 0, /* TBD */
112 MLX5_REG_NODE_DESC = 0x6001, 112 MLX5_REG_NODE_DESC = 0x6001,
113 MLX5_REG_HOST_ENDIANNESS = 0x7004, 113 MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +150,11 @@ enum mlx5_dev_event {
150 MLX5_DEV_EVENT_CLIENT_REREG, 150 MLX5_DEV_EVENT_CLIENT_REREG,
151}; 151};
152 152
153enum mlx5_port_status {
154 MLX5_PORT_UP = 1 << 1,
155 MLX5_PORT_DOWN = 1 << 2,
156};
157
153struct mlx5_uuar_info { 158struct mlx5_uuar_info {
154 struct mlx5_uar *uars; 159 struct mlx5_uar *uars;
155 int num_uars; 160 int num_uars;
@@ -269,56 +274,7 @@ struct mlx5_cmd {
269struct mlx5_port_caps { 274struct mlx5_port_caps {
270 int gid_table_len; 275 int gid_table_len;
271 int pkey_table_len; 276 int pkey_table_len;
272}; 277 u8 ext_port_cap;
273
274struct mlx5_general_caps {
275 u8 log_max_eq;
276 u8 log_max_cq;
277 u8 log_max_qp;
278 u8 log_max_mkey;
279 u8 log_max_pd;
280 u8 log_max_srq;
281 u8 log_max_strq;
282 u8 log_max_mrw_sz;
283 u8 log_max_bsf_list_size;
284 u8 log_max_klm_list_size;
285 u32 max_cqes;
286 int max_wqes;
287 u32 max_eqes;
288 u32 max_indirection;
289 int max_sq_desc_sz;
290 int max_rq_desc_sz;
291 int max_dc_sq_desc_sz;
292 u64 flags;
293 u16 stat_rate_support;
294 int log_max_msg;
295 int num_ports;
296 u8 log_max_ra_res_qp;
297 u8 log_max_ra_req_qp;
298 int max_srq_wqes;
299 int bf_reg_size;
300 int bf_regs_per_page;
301 struct mlx5_port_caps port[MLX5_MAX_PORTS];
302 u8 ext_port_cap[MLX5_MAX_PORTS];
303 int max_vf;
304 u32 reserved_lkey;
305 u8 local_ca_ack_delay;
306 u8 log_max_mcg;
307 u32 max_qp_mcg;
308 int min_page_sz;
309 int pd_cap;
310 u32 max_qp_counters;
311 u32 pkey_table_size;
312 u8 log_max_ra_req_dc;
313 u8 log_max_ra_res_dc;
314 u32 uar_sz;
315 u8 min_log_pg_sz;
316 u8 log_max_xrcd;
317 u16 log_uar_page_sz;
318};
319
320struct mlx5_caps {
321 struct mlx5_general_caps gen;
322}; 278};
323 279
324struct mlx5_cmd_mailbox { 280struct mlx5_cmd_mailbox {
@@ -334,8 +290,6 @@ struct mlx5_buf_list {
334 290
335struct mlx5_buf { 291struct mlx5_buf {
336 struct mlx5_buf_list direct; 292 struct mlx5_buf_list direct;
337 struct mlx5_buf_list *page_list;
338 int nbufs;
339 int npages; 293 int npages;
340 int size; 294 int size;
341 u8 page_shift; 295 u8 page_shift;
@@ -351,7 +305,6 @@ struct mlx5_eq {
351 u8 eqn; 305 u8 eqn;
352 int nent; 306 int nent;
353 u64 mask; 307 u64 mask;
354 char name[MLX5_MAX_EQ_NAME];
355 struct list_head list; 308 struct list_head list;
356 int index; 309 int index;
357 struct mlx5_rsc_debug *dbg; 310 struct mlx5_rsc_debug *dbg;
@@ -387,6 +340,8 @@ struct mlx5_core_mr {
387 340
388enum mlx5_res_type { 341enum mlx5_res_type {
389 MLX5_RES_QP, 342 MLX5_RES_QP,
343 MLX5_RES_SRQ,
344 MLX5_RES_XSRQ,
390}; 345};
391 346
392struct mlx5_core_rsc_common { 347struct mlx5_core_rsc_common {
@@ -396,6 +351,7 @@ struct mlx5_core_rsc_common {
396}; 351};
397 352
398struct mlx5_core_srq { 353struct mlx5_core_srq {
354 struct mlx5_core_rsc_common common; /* must be first */
399 u32 srqn; 355 u32 srqn;
400 int max; 356 int max;
401 int max_gs; 357 int max_gs;
@@ -414,7 +370,6 @@ struct mlx5_eq_table {
414 struct mlx5_eq pages_eq; 370 struct mlx5_eq pages_eq;
415 struct mlx5_eq async_eq; 371 struct mlx5_eq async_eq;
416 struct mlx5_eq cmd_eq; 372 struct mlx5_eq cmd_eq;
417 struct msix_entry *msix_arr;
418 int num_comp_vectors; 373 int num_comp_vectors;
419 /* protect EQs list 374 /* protect EQs list
420 */ 375 */
@@ -467,9 +422,16 @@ struct mlx5_mr_table {
467 struct radix_tree_root tree; 422 struct radix_tree_root tree;
468}; 423};
469 424
425struct mlx5_irq_info {
426 cpumask_var_t mask;
427 char name[MLX5_MAX_IRQ_NAME];
428};
429
470struct mlx5_priv { 430struct mlx5_priv {
471 char name[MLX5_MAX_NAME_LEN]; 431 char name[MLX5_MAX_NAME_LEN];
472 struct mlx5_eq_table eq_table; 432 struct mlx5_eq_table eq_table;
433 struct msix_entry *msix_arr;
434 struct mlx5_irq_info *irq_info;
473 struct mlx5_uuar_info uuari; 435 struct mlx5_uuar_info uuari;
474 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); 436 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
475 437
@@ -520,7 +482,9 @@ struct mlx5_core_dev {
520 u8 rev_id; 482 u8 rev_id;
521 char board_id[MLX5_BOARD_ID_LEN]; 483 char board_id[MLX5_BOARD_ID_LEN];
522 struct mlx5_cmd cmd; 484 struct mlx5_cmd cmd;
523 struct mlx5_caps caps; 485 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
486 u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
487 u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
524 phys_addr_t iseg_base; 488 phys_addr_t iseg_base;
525 struct mlx5_init_seg __iomem *iseg; 489 struct mlx5_init_seg __iomem *iseg;
526 void (*event) (struct mlx5_core_dev *dev, 490 void (*event) (struct mlx5_core_dev *dev,
@@ -529,6 +493,7 @@ struct mlx5_core_dev {
529 struct mlx5_priv priv; 493 struct mlx5_priv priv;
530 struct mlx5_profile *profile; 494 struct mlx5_profile *profile;
531 atomic_t num_qps; 495 atomic_t num_qps;
496 u32 issi;
532}; 497};
533 498
534struct mlx5_db { 499struct mlx5_db {
@@ -549,6 +514,11 @@ enum {
549 MLX5_COMP_EQ_SIZE = 1024, 514 MLX5_COMP_EQ_SIZE = 1024,
550}; 515};
551 516
517enum {
518 MLX5_PTYS_IB = 1 << 0,
519 MLX5_PTYS_EN = 1 << 2,
520};
521
552struct mlx5_db_pgdir { 522struct mlx5_db_pgdir {
553 struct list_head list; 523 struct list_head list;
554 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); 524 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -584,13 +554,44 @@ struct mlx5_pas {
584 u8 log_sz; 554 u8 log_sz;
585}; 555};
586 556
557enum port_state_policy {
558 MLX5_AAA_000
559};
560
561enum phy_port_state {
562 MLX5_AAA_111
563};
564
565struct mlx5_hca_vport_context {
566 u32 field_select;
567 bool sm_virt_aware;
568 bool has_smi;
569 bool has_raw;
570 enum port_state_policy policy;
571 enum phy_port_state phys_state;
572 enum ib_port_state vport_state;
573 u8 port_physical_state;
574 u64 sys_image_guid;
575 u64 port_guid;
576 u64 node_guid;
577 u32 cap_mask1;
578 u32 cap_mask1_perm;
579 u32 cap_mask2;
580 u32 cap_mask2_perm;
581 u16 lid;
582 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
583 u8 lmc;
584 u8 subnet_timeout;
585 u16 sm_lid;
586 u8 sm_sl;
587 u16 qkey_violation_counter;
588 u16 pkey_violation_counter;
589 bool grh_required;
590};
591
587static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) 592static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
588{ 593{
589 if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
590 return buf->direct.buf + offset; 594 return buf->direct.buf + offset;
591 else
592 return buf->page_list[offset >> PAGE_SHIFT].buf +
593 (offset & (PAGE_SIZE - 1));
594} 595}
595 596
596extern struct workqueue_struct *mlx5_core_wq; 597extern struct workqueue_struct *mlx5_core_wq;
@@ -654,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
654void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 655void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
655int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 656int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
656int mlx5_cmd_status_to_err_v2(void *ptr); 657int mlx5_cmd_status_to_err_v2(void *ptr);
657int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, 658int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
658 u16 opmod); 659 enum mlx5_cap_mode cap_mode);
659int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 660int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
660 int out_size); 661 int out_size);
661int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 662int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -665,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
665int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 666int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
666int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 667int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
667int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 668int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
669int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
670void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
668void mlx5_health_cleanup(void); 671void mlx5_health_cleanup(void);
669void __init mlx5_health_init(void); 672void __init mlx5_health_init(void);
670void mlx5_start_health_poll(struct mlx5_core_dev *dev); 673void mlx5_start_health_poll(struct mlx5_core_dev *dev);
671void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 674void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
672int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, 675int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
673 struct mlx5_buf *buf);
674void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); 676void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
675struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, 677struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
676 gfp_t flags, int npages); 678 gfp_t flags, int npages);
677void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, 679void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
678 struct mlx5_cmd_mailbox *head); 680 struct mlx5_cmd_mailbox *head);
679int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 681int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
680 struct mlx5_create_srq_mbox_in *in, int inlen); 682 struct mlx5_create_srq_mbox_in *in, int inlen,
683 int is_xrc);
681int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); 684int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
682int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 685int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
683 struct mlx5_query_srq_mbox_out *out); 686 struct mlx5_query_srq_mbox_out *out);
@@ -696,7 +699,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
696 u32 *mkey); 699 u32 *mkey);
697int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 700int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
698int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 701int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
699int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, 702int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
700 u16 opmod, u8 port); 703 u16 opmod, u8 port);
701void mlx5_pagealloc_init(struct mlx5_core_dev *dev); 704void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
702void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 705void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
@@ -734,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
734int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, 737int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
735 int size_in, void *data_out, int size_out, 738 int size_in, void *data_out, int size_out,
736 u16 reg_num, int arg, int write); 739 u16 reg_num, int arg, int write);
740
737int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); 741int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
742int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
743 int ptys_size, int proto_mask, u8 local_port);
744int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
745 u32 *proto_cap, int proto_mask);
746int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
747 u32 *proto_admin, int proto_mask);
748int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
749 u8 *link_width_oper, u8 local_port);
750int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
751 u8 *proto_oper, int proto_mask,
752 u8 local_port);
753int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
754 int proto_mask);
755int mlx5_set_port_status(struct mlx5_core_dev *dev,
756 enum mlx5_port_status status);
757int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
758
759int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
760void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
761void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
762 u8 port);
763
764int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
765 u8 *vl_hw_cap, u8 local_port);
738 766
739int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 767int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
740void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 768void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -799,6 +827,7 @@ struct mlx5_interface {
799void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); 827void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
800int mlx5_register_interface(struct mlx5_interface *intf); 828int mlx5_register_interface(struct mlx5_interface *intf);
801void mlx5_unregister_interface(struct mlx5_interface *intf); 829void mlx5_unregister_interface(struct mlx5_interface *intf);
830int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
802 831
803struct mlx5_profile { 832struct mlx5_profile {
804 u64 mask; 833 u64 mask;
@@ -809,4 +838,14 @@ struct mlx5_profile {
809 } mr_cache[MAX_MR_CACHE_ENTRIES]; 838 } mr_cache[MAX_MR_CACHE_ENTRIES];
810}; 839};
811 840
841static inline int mlx5_get_gid_table_len(u16 param)
842{
843 if (param > 4) {
844 pr_warn("gid table length is zero\n");
845 return 0;
846 }
847
848 return 8 * (1 << param);
849}
850
812#endif /* MLX5_DRIVER_H */ 851#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644
index 000000000000..5f922c6d4fc2
--- /dev/null
+++ b/include/linux/mlx5/flow_table.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_FLOW_TABLE_H
34#define MLX5_FLOW_TABLE_H
35
36#include <linux/mlx5/driver.h>
37
38struct mlx5_flow_table_group {
39 u8 log_sz;
40 u8 match_criteria_enable;
41 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
42};
43
44void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
45 u16 num_groups,
46 struct mlx5_flow_table_group *group);
47void mlx5_destroy_flow_table(void *flow_table);
48int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
49 void *match_criteria, void *flow_context,
50 u32 *flow_index);
51void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
52u32 mlx5_get_flow_table_id(void *flow_table);
53
54#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index cb3ad17edd1f..6d2f6fee041c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -28,12 +28,45 @@
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31*/
32
33#ifndef MLX5_IFC_H 32#ifndef MLX5_IFC_H
34#define MLX5_IFC_H 33#define MLX5_IFC_H
35 34
36enum { 35enum {
36 MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
37 MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
38 MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2,
39 MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3,
40 MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13,
41 MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14,
42 MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c,
43 MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d,
44 MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4,
45 MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5,
46 MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7,
47 MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc,
48 MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10,
49 MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11,
50 MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12,
51 MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8,
52 MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9,
53 MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15,
54 MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
55 MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
56 MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
57 MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
58 MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
59 MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
60};
61
62enum {
63 MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
64 MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
65 MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
66 MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
67};
68
69enum {
37 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 70 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
38 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 71 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
39 MLX5_CMD_OP_INIT_HCA = 0x102, 72 MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -43,6 +76,8 @@ enum {
43 MLX5_CMD_OP_QUERY_PAGES = 0x107, 76 MLX5_CMD_OP_QUERY_PAGES = 0x107,
44 MLX5_CMD_OP_MANAGE_PAGES = 0x108, 77 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
45 MLX5_CMD_OP_SET_HCA_CAP = 0x109, 78 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
79 MLX5_CMD_OP_QUERY_ISSI = 0x10a,
80 MLX5_CMD_OP_SET_ISSI = 0x10b,
46 MLX5_CMD_OP_CREATE_MKEY = 0x200, 81 MLX5_CMD_OP_CREATE_MKEY = 0x200,
47 MLX5_CMD_OP_QUERY_MKEY = 0x201, 82 MLX5_CMD_OP_QUERY_MKEY = 0x201,
48 MLX5_CMD_OP_DESTROY_MKEY = 0x202, 83 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -66,6 +101,7 @@ enum {
66 MLX5_CMD_OP_2ERR_QP = 0x507, 101 MLX5_CMD_OP_2ERR_QP = 0x507,
67 MLX5_CMD_OP_2RST_QP = 0x50a, 102 MLX5_CMD_OP_2RST_QP = 0x50a,
68 MLX5_CMD_OP_QUERY_QP = 0x50b, 103 MLX5_CMD_OP_QUERY_QP = 0x50b,
104 MLX5_CMD_OP_SQD_RTS_QP = 0x50c,
69 MLX5_CMD_OP_INIT2INIT_QP = 0x50e, 105 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
70 MLX5_CMD_OP_CREATE_PSV = 0x600, 106 MLX5_CMD_OP_CREATE_PSV = 0x600,
71 MLX5_CMD_OP_DESTROY_PSV = 0x601, 107 MLX5_CMD_OP_DESTROY_PSV = 0x601,
@@ -73,7 +109,10 @@ enum {
73 MLX5_CMD_OP_DESTROY_SRQ = 0x701, 109 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
74 MLX5_CMD_OP_QUERY_SRQ = 0x702, 110 MLX5_CMD_OP_QUERY_SRQ = 0x702,
75 MLX5_CMD_OP_ARM_RQ = 0x703, 111 MLX5_CMD_OP_ARM_RQ = 0x703,
76 MLX5_CMD_OP_RESIZE_SRQ = 0x704, 112 MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705,
113 MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706,
114 MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707,
115 MLX5_CMD_OP_ARM_XRC_SRQ = 0x708,
77 MLX5_CMD_OP_CREATE_DCT = 0x710, 116 MLX5_CMD_OP_CREATE_DCT = 0x710,
78 MLX5_CMD_OP_DESTROY_DCT = 0x711, 117 MLX5_CMD_OP_DESTROY_DCT = 0x711,
79 MLX5_CMD_OP_DRAIN_DCT = 0x712, 118 MLX5_CMD_OP_DRAIN_DCT = 0x712,
@@ -85,8 +124,12 @@ enum {
85 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, 124 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
86 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, 125 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
87 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, 126 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
88 MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, 127 MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
89 MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, 128 MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
129 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
130 MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
131 MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
132 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
90 MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, 133 MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
91 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, 134 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
92 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, 135 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -98,7 +141,7 @@ enum {
98 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, 141 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
99 MLX5_CMD_OP_ACCESS_REG = 0x805, 142 MLX5_CMD_OP_ACCESS_REG = 0x805,
100 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, 143 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
101 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, 144 MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
102 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, 145 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
103 MLX5_CMD_OP_MAD_IFC = 0x50d, 146 MLX5_CMD_OP_MAD_IFC = 0x50d,
104 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, 147 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -106,23 +149,22 @@ enum {
106 MLX5_CMD_OP_NOP = 0x80d, 149 MLX5_CMD_OP_NOP = 0x80d,
107 MLX5_CMD_OP_ALLOC_XRCD = 0x80e, 150 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
108 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, 151 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
109 MLX5_CMD_OP_SET_BURST_SIZE = 0x812, 152 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
110 MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, 153 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
111 MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, 154 MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
112 MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, 155 MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
113 MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, 156 MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
114 MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, 157 MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825,
115 MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, 158 MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826,
116 MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, 159 MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827,
117 MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, 160 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
161 MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
162 MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
163 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
118 MLX5_CMD_OP_CREATE_TIR = 0x900, 164 MLX5_CMD_OP_CREATE_TIR = 0x900,
119 MLX5_CMD_OP_MODIFY_TIR = 0x901, 165 MLX5_CMD_OP_MODIFY_TIR = 0x901,
120 MLX5_CMD_OP_DESTROY_TIR = 0x902, 166 MLX5_CMD_OP_DESTROY_TIR = 0x902,
121 MLX5_CMD_OP_QUERY_TIR = 0x903, 167 MLX5_CMD_OP_QUERY_TIR = 0x903,
122 MLX5_CMD_OP_CREATE_TIS = 0x912,
123 MLX5_CMD_OP_MODIFY_TIS = 0x913,
124 MLX5_CMD_OP_DESTROY_TIS = 0x914,
125 MLX5_CMD_OP_QUERY_TIS = 0x915,
126 MLX5_CMD_OP_CREATE_SQ = 0x904, 168 MLX5_CMD_OP_CREATE_SQ = 0x904,
127 MLX5_CMD_OP_MODIFY_SQ = 0x905, 169 MLX5_CMD_OP_MODIFY_SQ = 0x905,
128 MLX5_CMD_OP_DESTROY_SQ = 0x906, 170 MLX5_CMD_OP_DESTROY_SQ = 0x906,
@@ -135,9 +177,430 @@ enum {
135 MLX5_CMD_OP_MODIFY_RMP = 0x90d, 177 MLX5_CMD_OP_MODIFY_RMP = 0x90d,
136 MLX5_CMD_OP_DESTROY_RMP = 0x90e, 178 MLX5_CMD_OP_DESTROY_RMP = 0x90e,
137 MLX5_CMD_OP_QUERY_RMP = 0x90f, 179 MLX5_CMD_OP_QUERY_RMP = 0x90f,
138 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, 180 MLX5_CMD_OP_CREATE_TIS = 0x912,
139 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, 181 MLX5_CMD_OP_MODIFY_TIS = 0x913,
140 MLX5_CMD_OP_MAX = 0x911 182 MLX5_CMD_OP_DESTROY_TIS = 0x914,
183 MLX5_CMD_OP_QUERY_TIS = 0x915,
184 MLX5_CMD_OP_CREATE_RQT = 0x916,
185 MLX5_CMD_OP_MODIFY_RQT = 0x917,
186 MLX5_CMD_OP_DESTROY_RQT = 0x918,
187 MLX5_CMD_OP_QUERY_RQT = 0x919,
188 MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
189 MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
190 MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
191 MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
192 MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934,
193 MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
194 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
195 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
196 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938
197};
198
199struct mlx5_ifc_flow_table_fields_supported_bits {
200 u8 outer_dmac[0x1];
201 u8 outer_smac[0x1];
202 u8 outer_ether_type[0x1];
203 u8 reserved_0[0x1];
204 u8 outer_first_prio[0x1];
205 u8 outer_first_cfi[0x1];
206 u8 outer_first_vid[0x1];
207 u8 reserved_1[0x1];
208 u8 outer_second_prio[0x1];
209 u8 outer_second_cfi[0x1];
210 u8 outer_second_vid[0x1];
211 u8 reserved_2[0x1];
212 u8 outer_sip[0x1];
213 u8 outer_dip[0x1];
214 u8 outer_frag[0x1];
215 u8 outer_ip_protocol[0x1];
216 u8 outer_ip_ecn[0x1];
217 u8 outer_ip_dscp[0x1];
218 u8 outer_udp_sport[0x1];
219 u8 outer_udp_dport[0x1];
220 u8 outer_tcp_sport[0x1];
221 u8 outer_tcp_dport[0x1];
222 u8 outer_tcp_flags[0x1];
223 u8 outer_gre_protocol[0x1];
224 u8 outer_gre_key[0x1];
225 u8 outer_vxlan_vni[0x1];
226 u8 reserved_3[0x5];
227 u8 source_eswitch_port[0x1];
228
229 u8 inner_dmac[0x1];
230 u8 inner_smac[0x1];
231 u8 inner_ether_type[0x1];
232 u8 reserved_4[0x1];
233 u8 inner_first_prio[0x1];
234 u8 inner_first_cfi[0x1];
235 u8 inner_first_vid[0x1];
236 u8 reserved_5[0x1];
237 u8 inner_second_prio[0x1];
238 u8 inner_second_cfi[0x1];
239 u8 inner_second_vid[0x1];
240 u8 reserved_6[0x1];
241 u8 inner_sip[0x1];
242 u8 inner_dip[0x1];
243 u8 inner_frag[0x1];
244 u8 inner_ip_protocol[0x1];
245 u8 inner_ip_ecn[0x1];
246 u8 inner_ip_dscp[0x1];
247 u8 inner_udp_sport[0x1];
248 u8 inner_udp_dport[0x1];
249 u8 inner_tcp_sport[0x1];
250 u8 inner_tcp_dport[0x1];
251 u8 inner_tcp_flags[0x1];
252 u8 reserved_7[0x9];
253
254 u8 reserved_8[0x40];
255};
256
257struct mlx5_ifc_flow_table_prop_layout_bits {
258 u8 ft_support[0x1];
259 u8 reserved_0[0x1f];
260
261 u8 reserved_1[0x2];
262 u8 log_max_ft_size[0x6];
263 u8 reserved_2[0x10];
264 u8 max_ft_level[0x8];
265
266 u8 reserved_3[0x20];
267
268 u8 reserved_4[0x18];
269 u8 log_max_ft_num[0x8];
270
271 u8 reserved_5[0x18];
272 u8 log_max_destination[0x8];
273
274 u8 reserved_6[0x18];
275 u8 log_max_flow[0x8];
276
277 u8 reserved_7[0x40];
278
279 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
280
281 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
282};
283
284struct mlx5_ifc_odp_per_transport_service_cap_bits {
285 u8 send[0x1];
286 u8 receive[0x1];
287 u8 write[0x1];
288 u8 read[0x1];
289 u8 reserved_0[0x1];
290 u8 srq_receive[0x1];
291 u8 reserved_1[0x1a];
292};
293
294struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
295 u8 smac_47_16[0x20];
296
297 u8 smac_15_0[0x10];
298 u8 ethertype[0x10];
299
300 u8 dmac_47_16[0x20];
301
302 u8 dmac_15_0[0x10];
303 u8 first_prio[0x3];
304 u8 first_cfi[0x1];
305 u8 first_vid[0xc];
306
307 u8 ip_protocol[0x8];
308 u8 ip_dscp[0x6];
309 u8 ip_ecn[0x2];
310 u8 vlan_tag[0x1];
311 u8 reserved_0[0x1];
312 u8 frag[0x1];
313 u8 reserved_1[0x4];
314 u8 tcp_flags[0x9];
315
316 u8 tcp_sport[0x10];
317 u8 tcp_dport[0x10];
318
319 u8 reserved_2[0x20];
320
321 u8 udp_sport[0x10];
322 u8 udp_dport[0x10];
323
324 u8 src_ip[4][0x20];
325
326 u8 dst_ip[4][0x20];
327};
328
329struct mlx5_ifc_fte_match_set_misc_bits {
330 u8 reserved_0[0x20];
331
332 u8 reserved_1[0x10];
333 u8 source_port[0x10];
334
335 u8 outer_second_prio[0x3];
336 u8 outer_second_cfi[0x1];
337 u8 outer_second_vid[0xc];
338 u8 inner_second_prio[0x3];
339 u8 inner_second_cfi[0x1];
340 u8 inner_second_vid[0xc];
341
342 u8 outer_second_vlan_tag[0x1];
343 u8 inner_second_vlan_tag[0x1];
344 u8 reserved_2[0xe];
345 u8 gre_protocol[0x10];
346
347 u8 gre_key_h[0x18];
348 u8 gre_key_l[0x8];
349
350 u8 vxlan_vni[0x18];
351 u8 reserved_3[0x8];
352
353 u8 reserved_4[0x20];
354
355 u8 reserved_5[0xc];
356 u8 outer_ipv6_flow_label[0x14];
357
358 u8 reserved_6[0xc];
359 u8 inner_ipv6_flow_label[0x14];
360
361 u8 reserved_7[0xe0];
362};
363
364struct mlx5_ifc_cmd_pas_bits {
365 u8 pa_h[0x20];
366
367 u8 pa_l[0x14];
368 u8 reserved_0[0xc];
369};
370
371struct mlx5_ifc_uint64_bits {
372 u8 hi[0x20];
373
374 u8 lo[0x20];
375};
376
377enum {
378 MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0,
379 MLX5_ADS_STAT_RATE_2_5GBPS = 0x7,
380 MLX5_ADS_STAT_RATE_10GBPS = 0x8,
381 MLX5_ADS_STAT_RATE_30GBPS = 0x9,
382 MLX5_ADS_STAT_RATE_5GBPS = 0xa,
383 MLX5_ADS_STAT_RATE_20GBPS = 0xb,
384 MLX5_ADS_STAT_RATE_40GBPS = 0xc,
385 MLX5_ADS_STAT_RATE_60GBPS = 0xd,
386 MLX5_ADS_STAT_RATE_80GBPS = 0xe,
387 MLX5_ADS_STAT_RATE_120GBPS = 0xf,
388};
389
390struct mlx5_ifc_ads_bits {
391 u8 fl[0x1];
392 u8 free_ar[0x1];
393 u8 reserved_0[0xe];
394 u8 pkey_index[0x10];
395
396 u8 reserved_1[0x8];
397 u8 grh[0x1];
398 u8 mlid[0x7];
399 u8 rlid[0x10];
400
401 u8 ack_timeout[0x5];
402 u8 reserved_2[0x3];
403 u8 src_addr_index[0x8];
404 u8 reserved_3[0x4];
405 u8 stat_rate[0x4];
406 u8 hop_limit[0x8];
407
408 u8 reserved_4[0x4];
409 u8 tclass[0x8];
410 u8 flow_label[0x14];
411
412 u8 rgid_rip[16][0x8];
413
414 u8 reserved_5[0x4];
415 u8 f_dscp[0x1];
416 u8 f_ecn[0x1];
417 u8 reserved_6[0x1];
418 u8 f_eth_prio[0x1];
419 u8 ecn[0x2];
420 u8 dscp[0x6];
421 u8 udp_sport[0x10];
422
423 u8 dei_cfi[0x1];
424 u8 eth_prio[0x3];
425 u8 sl[0x4];
426 u8 port[0x8];
427 u8 rmac_47_32[0x10];
428
429 u8 rmac_31_0[0x20];
430};
431
432struct mlx5_ifc_flow_table_nic_cap_bits {
433 u8 reserved_0[0x200];
434
435 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
436
437 u8 reserved_1[0x200];
438
439 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
440
441 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
442
443 u8 reserved_2[0x200];
444
445 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
446
447 u8 reserved_3[0x7200];
448};
449
450struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
451 u8 csum_cap[0x1];
452 u8 vlan_cap[0x1];
453 u8 lro_cap[0x1];
454 u8 lro_psh_flag[0x1];
455 u8 lro_time_stamp[0x1];
456 u8 reserved_0[0x6];
457 u8 max_lso_cap[0x5];
458 u8 reserved_1[0x4];
459 u8 rss_ind_tbl_cap[0x4];
460 u8 reserved_2[0x3];
461 u8 tunnel_lso_const_out_ip_id[0x1];
462 u8 reserved_3[0x2];
463 u8 tunnel_statless_gre[0x1];
464 u8 tunnel_stateless_vxlan[0x1];
465
466 u8 reserved_4[0x20];
467
468 u8 reserved_5[0x10];
469 u8 lro_min_mss_size[0x10];
470
471 u8 reserved_6[0x120];
472
473 u8 lro_timer_supported_periods[4][0x20];
474
475 u8 reserved_7[0x600];
476};
477
478struct mlx5_ifc_roce_cap_bits {
479 u8 roce_apm[0x1];
480 u8 reserved_0[0x1f];
481
482 u8 reserved_1[0x60];
483
484 u8 reserved_2[0xc];
485 u8 l3_type[0x4];
486 u8 reserved_3[0x8];
487 u8 roce_version[0x8];
488
489 u8 reserved_4[0x10];
490 u8 r_roce_dest_udp_port[0x10];
491
492 u8 r_roce_max_src_udp_port[0x10];
493 u8 r_roce_min_src_udp_port[0x10];
494
495 u8 reserved_5[0x10];
496 u8 roce_address_table_size[0x10];
497
498 u8 reserved_6[0x700];
499};
500
501enum {
502 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
503 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
504 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4,
505 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8,
506 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10,
507 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20,
508 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40,
509 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80,
510 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100,
511};
512
513enum {
514 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1,
515 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2,
516 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4,
517 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8,
518 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10,
519 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20,
520 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40,
521 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80,
522 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100,
523};
524
525struct mlx5_ifc_atomic_caps_bits {
526 u8 reserved_0[0x40];
527
528 u8 atomic_req_endianness[0x1];
529 u8 reserved_1[0x1f];
530
531 u8 reserved_2[0x20];
532
533 u8 reserved_3[0x10];
534 u8 atomic_operations[0x10];
535
536 u8 reserved_4[0x10];
537 u8 atomic_size_qp[0x10];
538
539 u8 reserved_5[0x10];
540 u8 atomic_size_dc[0x10];
541
542 u8 reserved_6[0x720];
543};
544
545struct mlx5_ifc_odp_cap_bits {
546 u8 reserved_0[0x40];
547
548 u8 sig[0x1];
549 u8 reserved_1[0x1f];
550
551 u8 reserved_2[0x20];
552
553 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
554
555 struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
556
557 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
558
559 u8 reserved_3[0x720];
560};
561
562enum {
563 MLX5_WQ_TYPE_LINKED_LIST = 0x0,
564 MLX5_WQ_TYPE_CYCLIC = 0x1,
565 MLX5_WQ_TYPE_STRQ = 0x2,
566};
567
568enum {
569 MLX5_WQ_END_PAD_MODE_NONE = 0x0,
570 MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
571};
572
573enum {
574 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0,
575 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1,
576 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2,
577 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3,
578 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4,
579};
580
581enum {
582 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0,
583 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1,
584 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2,
585 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3,
586 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4,
587 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5,
588};
589
590enum {
591 MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0,
592 MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1,
593};
594
595enum {
596 MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0,
597 MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1,
598 MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3,
599};
600
601enum {
602 MLX5_CAP_PORT_TYPE_IB = 0x0,
603 MLX5_CAP_PORT_TYPE_ETH = 0x1,
141}; 604};
142 605
143struct mlx5_ifc_cmd_hca_cap_bits { 606struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
148 u8 reserved_1[0xb]; 611 u8 reserved_1[0xb];
149 u8 log_max_qp[0x5]; 612 u8 log_max_qp[0x5];
150 613
151 u8 log_max_strq_sz[0x8]; 614 u8 reserved_2[0xb];
152 u8 reserved_2[0x3]; 615 u8 log_max_srq[0x5];
153 u8 log_max_srqs[0x5];
154 u8 reserved_3[0x10]; 616 u8 reserved_3[0x10];
155 617
156 u8 reserved_4[0x8]; 618 u8 reserved_4[0x8];
@@ -185,123 +647,2112 @@ struct mlx5_ifc_cmd_hca_cap_bits {
185 u8 pad_cap[0x1]; 647 u8 pad_cap[0x1];
186 u8 cc_query_allowed[0x1]; 648 u8 cc_query_allowed[0x1];
187 u8 cc_modify_allowed[0x1]; 649 u8 cc_modify_allowed[0x1];
188 u8 reserved_15[0x1d]; 650 u8 reserved_15[0xd];
651 u8 gid_table_size[0x10];
189 652
190 u8 reserved_16[0x6]; 653 u8 out_of_seq_cnt[0x1];
654 u8 vport_counters[0x1];
655 u8 reserved_16[0x4];
191 u8 max_qp_cnt[0xa]; 656 u8 max_qp_cnt[0xa];
192 u8 pkey_table_size[0x10]; 657 u8 pkey_table_size[0x10];
193 658
194 u8 eswitch_owner[0x1]; 659 u8 vport_group_manager[0x1];
195 u8 reserved_17[0xa]; 660 u8 vhca_group_manager[0x1];
661 u8 ib_virt[0x1];
662 u8 eth_virt[0x1];
663 u8 reserved_17[0x1];
664 u8 ets[0x1];
665 u8 nic_flow_table[0x1];
666 u8 reserved_18[0x4];
196 u8 local_ca_ack_delay[0x5]; 667 u8 local_ca_ack_delay[0x5];
197 u8 reserved_18[0x8]; 668 u8 reserved_19[0x6];
669 u8 port_type[0x2];
198 u8 num_ports[0x8]; 670 u8 num_ports[0x8];
199 671
200 u8 reserved_19[0x3]; 672 u8 reserved_20[0x3];
201 u8 log_max_msg[0x5]; 673 u8 log_max_msg[0x5];
202 u8 reserved_20[0x18]; 674 u8 reserved_21[0x18];
203 675
204 u8 stat_rate_support[0x10]; 676 u8 stat_rate_support[0x10];
205 u8 reserved_21[0x10]; 677 u8 reserved_22[0xc];
678 u8 cqe_version[0x4];
206 679
207 u8 reserved_22[0x10]; 680 u8 compact_address_vector[0x1];
681 u8 reserved_23[0xe];
682 u8 drain_sigerr[0x1];
208 u8 cmdif_checksum[0x2]; 683 u8 cmdif_checksum[0x2];
209 u8 sigerr_cqe[0x1]; 684 u8 sigerr_cqe[0x1];
210 u8 reserved_23[0x1]; 685 u8 reserved_24[0x1];
211 u8 wq_signature[0x1]; 686 u8 wq_signature[0x1];
212 u8 sctr_data_cqe[0x1]; 687 u8 sctr_data_cqe[0x1];
213 u8 reserved_24[0x1]; 688 u8 reserved_25[0x1];
214 u8 sho[0x1]; 689 u8 sho[0x1];
215 u8 tph[0x1]; 690 u8 tph[0x1];
216 u8 rf[0x1]; 691 u8 rf[0x1];
217 u8 dc[0x1]; 692 u8 dct[0x1];
218 u8 reserved_25[0x2]; 693 u8 reserved_26[0x1];
694 u8 eth_net_offloads[0x1];
219 u8 roce[0x1]; 695 u8 roce[0x1];
220 u8 atomic[0x1]; 696 u8 atomic[0x1];
221 u8 rsz_srq[0x1]; 697 u8 reserved_27[0x1];
222 698
223 u8 cq_oi[0x1]; 699 u8 cq_oi[0x1];
224 u8 cq_resize[0x1]; 700 u8 cq_resize[0x1];
225 u8 cq_moderation[0x1]; 701 u8 cq_moderation[0x1];
226 u8 sniffer_rule_flow[0x1]; 702 u8 reserved_28[0x3];
227 u8 sniffer_rule_vport[0x1]; 703 u8 cq_eq_remap[0x1];
228 u8 sniffer_rule_phy[0x1];
229 u8 reserved_26[0x1];
230 u8 pg[0x1]; 704 u8 pg[0x1];
231 u8 block_lb_mc[0x1]; 705 u8 block_lb_mc[0x1];
232 u8 reserved_27[0x3]; 706 u8 reserved_29[0x1];
707 u8 scqe_break_moderation[0x1];
708 u8 reserved_30[0x1];
233 u8 cd[0x1]; 709 u8 cd[0x1];
234 u8 reserved_28[0x1]; 710 u8 reserved_31[0x1];
235 u8 apm[0x1]; 711 u8 apm[0x1];
236 u8 reserved_29[0x7]; 712 u8 reserved_32[0x7];
237 u8 qkv[0x1]; 713 u8 qkv[0x1];
238 u8 pkv[0x1]; 714 u8 pkv[0x1];
239 u8 reserved_30[0x4]; 715 u8 reserved_33[0x4];
240 u8 xrc[0x1]; 716 u8 xrc[0x1];
241 u8 ud[0x1]; 717 u8 ud[0x1];
242 u8 uc[0x1]; 718 u8 uc[0x1];
243 u8 rc[0x1]; 719 u8 rc[0x1];
244 720
245 u8 reserved_31[0xa]; 721 u8 reserved_34[0xa];
246 u8 uar_sz[0x6]; 722 u8 uar_sz[0x6];
247 u8 reserved_32[0x8]; 723 u8 reserved_35[0x8];
248 u8 log_pg_sz[0x8]; 724 u8 log_pg_sz[0x8];
249 725
250 u8 bf[0x1]; 726 u8 bf[0x1];
251 u8 reserved_33[0xa]; 727 u8 reserved_36[0x1];
728 u8 pad_tx_eth_packet[0x1];
729 u8 reserved_37[0x8];
252 u8 log_bf_reg_size[0x5]; 730 u8 log_bf_reg_size[0x5];
253 u8 reserved_34[0x10]; 731 u8 reserved_38[0x10];
254 732
255 u8 reserved_35[0x10]; 733 u8 reserved_39[0x10];
256 u8 max_wqe_sz_sq[0x10]; 734 u8 max_wqe_sz_sq[0x10];
257 735
258 u8 reserved_36[0x10]; 736 u8 reserved_40[0x10];
259 u8 max_wqe_sz_rq[0x10]; 737 u8 max_wqe_sz_rq[0x10];
260 738
261 u8 reserved_37[0x10]; 739 u8 reserved_41[0x10];
262 u8 max_wqe_sz_sq_dc[0x10]; 740 u8 max_wqe_sz_sq_dc[0x10];
263 741
264 u8 reserved_38[0x7]; 742 u8 reserved_42[0x7];
265 u8 max_qp_mcg[0x19]; 743 u8 max_qp_mcg[0x19];
266 744
267 u8 reserved_39[0x18]; 745 u8 reserved_43[0x18];
268 u8 log_max_mcg[0x8]; 746 u8 log_max_mcg[0x8];
269 747
270 u8 reserved_40[0xb]; 748 u8 reserved_44[0x3];
749 u8 log_max_transport_domain[0x5];
750 u8 reserved_45[0x3];
271 u8 log_max_pd[0x5]; 751 u8 log_max_pd[0x5];
272 u8 reserved_41[0xb]; 752 u8 reserved_46[0xb];
273 u8 log_max_xrcd[0x5]; 753 u8 log_max_xrcd[0x5];
274 754
275 u8 reserved_42[0x20]; 755 u8 reserved_47[0x20];
276 756
277 u8 reserved_43[0x3]; 757 u8 reserved_48[0x3];
278 u8 log_max_rq[0x5]; 758 u8 log_max_rq[0x5];
279 u8 reserved_44[0x3]; 759 u8 reserved_49[0x3];
280 u8 log_max_sq[0x5]; 760 u8 log_max_sq[0x5];
281 u8 reserved_45[0x3]; 761 u8 reserved_50[0x3];
282 u8 log_max_tir[0x5]; 762 u8 log_max_tir[0x5];
283 u8 reserved_46[0x3]; 763 u8 reserved_51[0x3];
284 u8 log_max_tis[0x5]; 764 u8 log_max_tis[0x5];
285 765
286 u8 reserved_47[0x13]; 766 u8 basic_cyclic_rcv_wqe[0x1];
287 u8 log_max_rq_per_tir[0x5]; 767 u8 reserved_52[0x2];
288 u8 reserved_48[0x3]; 768 u8 log_max_rmp[0x5];
769 u8 reserved_53[0x3];
770 u8 log_max_rqt[0x5];
771 u8 reserved_54[0x3];
772 u8 log_max_rqt_size[0x5];
773 u8 reserved_55[0x3];
289 u8 log_max_tis_per_sq[0x5]; 774 u8 log_max_tis_per_sq[0x5];
290 775
291 u8 reserved_49[0xe0]; 776 u8 reserved_56[0x3];
777 u8 log_max_stride_sz_rq[0x5];
778 u8 reserved_57[0x3];
779 u8 log_min_stride_sz_rq[0x5];
780 u8 reserved_58[0x3];
781 u8 log_max_stride_sz_sq[0x5];
782 u8 reserved_59[0x3];
783 u8 log_min_stride_sz_sq[0x5];
292 784
293 u8 reserved_50[0x10]; 785 u8 reserved_60[0x1b];
786 u8 log_max_wq_sz[0x5];
787
788 u8 reserved_61[0xa0];
789
790 u8 reserved_62[0x3];
791 u8 log_max_l2_table[0x5];
792 u8 reserved_63[0x8];
294 u8 log_uar_page_sz[0x10]; 793 u8 log_uar_page_sz[0x10];
295 794
296 u8 reserved_51[0x100]; 795 u8 reserved_64[0x100];
297 796
298 u8 reserved_52[0x1f]; 797 u8 reserved_65[0x1f];
299 u8 cqe_zip[0x1]; 798 u8 cqe_zip[0x1];
300 799
301 u8 cqe_zip_timeout[0x10]; 800 u8 cqe_zip_timeout[0x10];
302 u8 cqe_zip_max_num[0x10]; 801 u8 cqe_zip_max_num[0x10];
303 802
304 u8 reserved_53[0x220]; 803 u8 reserved_66[0x220];
804};
805
806enum {
807 MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
808 MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
809};
810
811struct mlx5_ifc_dest_format_struct_bits {
812 u8 destination_type[0x8];
813 u8 destination_id[0x18];
814
815 u8 reserved_0[0x20];
816};
817
818struct mlx5_ifc_fte_match_param_bits {
819 struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
820
821 struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
822
823 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
824
825 u8 reserved_0[0xa00];
826};
827
828enum {
829 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
830 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
831 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
832 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
833 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
834};
835
836struct mlx5_ifc_rx_hash_field_select_bits {
837 u8 l3_prot_type[0x1];
838 u8 l4_prot_type[0x1];
839 u8 selected_fields[0x1e];
840};
841
842enum {
843 MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0,
844 MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1,
845};
846
847enum {
848 MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0,
849 MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1,
850};
851
852struct mlx5_ifc_wq_bits {
853 u8 wq_type[0x4];
854 u8 wq_signature[0x1];
855 u8 end_padding_mode[0x2];
856 u8 cd_slave[0x1];
857 u8 reserved_0[0x18];
858
859 u8 hds_skip_first_sge[0x1];
860 u8 log2_hds_buf_size[0x3];
861 u8 reserved_1[0x7];
862 u8 page_offset[0x5];
863 u8 lwm[0x10];
864
865 u8 reserved_2[0x8];
866 u8 pd[0x18];
867
868 u8 reserved_3[0x8];
869 u8 uar_page[0x18];
870
871 u8 dbr_addr[0x40];
872
873 u8 hw_counter[0x20];
874
875 u8 sw_counter[0x20];
876
877 u8 reserved_4[0xc];
878 u8 log_wq_stride[0x4];
879 u8 reserved_5[0x3];
880 u8 log_wq_pg_sz[0x5];
881 u8 reserved_6[0x3];
882 u8 log_wq_sz[0x5];
883
884 u8 reserved_7[0x4e0];
885
886 struct mlx5_ifc_cmd_pas_bits pas[0];
887};
888
889struct mlx5_ifc_rq_num_bits {
890 u8 reserved_0[0x8];
891 u8 rq_num[0x18];
892};
893
894struct mlx5_ifc_mac_address_layout_bits {
895 u8 reserved_0[0x10];
896 u8 mac_addr_47_32[0x10];
897
898 u8 mac_addr_31_0[0x20];
899};
900
901struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
902 u8 reserved_0[0xa0];
903
904 u8 min_time_between_cnps[0x20];
905
906 u8 reserved_1[0x12];
907 u8 cnp_dscp[0x6];
908 u8 reserved_2[0x5];
909 u8 cnp_802p_prio[0x3];
910
911 u8 reserved_3[0x720];
912};
913
914struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
915 u8 reserved_0[0x60];
916
917 u8 reserved_1[0x4];
918 u8 clamp_tgt_rate[0x1];
919 u8 reserved_2[0x3];
920 u8 clamp_tgt_rate_after_time_inc[0x1];
921 u8 reserved_3[0x17];
922
923 u8 reserved_4[0x20];
924
925 u8 rpg_time_reset[0x20];
926
927 u8 rpg_byte_reset[0x20];
928
929 u8 rpg_threshold[0x20];
930
931 u8 rpg_max_rate[0x20];
932
933 u8 rpg_ai_rate[0x20];
934
935 u8 rpg_hai_rate[0x20];
936
937 u8 rpg_gd[0x20];
938
939 u8 rpg_min_dec_fac[0x20];
940
941 u8 rpg_min_rate[0x20];
942
943 u8 reserved_5[0xe0];
944
945 u8 rate_to_set_on_first_cnp[0x20];
946
947 u8 dce_tcp_g[0x20];
948
949 u8 dce_tcp_rtt[0x20];
950
951 u8 rate_reduce_monitor_period[0x20];
952
953 u8 reserved_6[0x20];
954
955 u8 initial_alpha_value[0x20];
956
957 u8 reserved_7[0x4a0];
958};
959
960struct mlx5_ifc_cong_control_802_1qau_rp_bits {
961 u8 reserved_0[0x80];
962
963 u8 rppp_max_rps[0x20];
964
965 u8 rpg_time_reset[0x20];
966
967 u8 rpg_byte_reset[0x20];
968
969 u8 rpg_threshold[0x20];
970
971 u8 rpg_max_rate[0x20];
972
973 u8 rpg_ai_rate[0x20];
974
975 u8 rpg_hai_rate[0x20];
976
977 u8 rpg_gd[0x20];
978
979 u8 rpg_min_dec_fac[0x20];
980
981 u8 rpg_min_rate[0x20];
982
983 u8 reserved_1[0x640];
984};
985
986enum {
987 MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1,
988 MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2,
989 MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4,
990};
991
992struct mlx5_ifc_resize_field_select_bits {
993 u8 resize_field_select[0x20];
994};
995
996enum {
997 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
998 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
999 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4,
1000 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8,
1001};
1002
1003struct mlx5_ifc_modify_field_select_bits {
1004 u8 modify_field_select[0x20];
1005};
1006
1007struct mlx5_ifc_field_select_r_roce_np_bits {
1008 u8 field_select_r_roce_np[0x20];
1009};
1010
1011struct mlx5_ifc_field_select_r_roce_rp_bits {
1012 u8 field_select_r_roce_rp[0x20];
1013};
1014
1015enum {
1016 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4,
1017 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8,
1018 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10,
1019 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20,
1020 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40,
1021 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80,
1022 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100,
1023 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200,
1024 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400,
1025 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800,
1026};
1027
1028struct mlx5_ifc_field_select_802_1qau_rp_bits {
1029 u8 field_select_8021qaurp[0x20];
1030};
1031
1032struct mlx5_ifc_phys_layer_cntrs_bits {
1033 u8 time_since_last_clear_high[0x20];
1034
1035 u8 time_since_last_clear_low[0x20];
1036
1037 u8 symbol_errors_high[0x20];
1038
1039 u8 symbol_errors_low[0x20];
1040
1041 u8 sync_headers_errors_high[0x20];
1042
1043 u8 sync_headers_errors_low[0x20];
1044
1045 u8 edpl_bip_errors_lane0_high[0x20];
1046
1047 u8 edpl_bip_errors_lane0_low[0x20];
1048
1049 u8 edpl_bip_errors_lane1_high[0x20];
1050
1051 u8 edpl_bip_errors_lane1_low[0x20];
1052
1053 u8 edpl_bip_errors_lane2_high[0x20];
1054
1055 u8 edpl_bip_errors_lane2_low[0x20];
1056
1057 u8 edpl_bip_errors_lane3_high[0x20];
1058
1059 u8 edpl_bip_errors_lane3_low[0x20];
1060
1061 u8 fc_fec_corrected_blocks_lane0_high[0x20];
1062
1063 u8 fc_fec_corrected_blocks_lane0_low[0x20];
1064
1065 u8 fc_fec_corrected_blocks_lane1_high[0x20];
1066
1067 u8 fc_fec_corrected_blocks_lane1_low[0x20];
1068
1069 u8 fc_fec_corrected_blocks_lane2_high[0x20];
1070
1071 u8 fc_fec_corrected_blocks_lane2_low[0x20];
1072
1073 u8 fc_fec_corrected_blocks_lane3_high[0x20];
1074
1075 u8 fc_fec_corrected_blocks_lane3_low[0x20];
1076
1077 u8 fc_fec_uncorrectable_blocks_lane0_high[0x20];
1078
1079 u8 fc_fec_uncorrectable_blocks_lane0_low[0x20];
1080
1081 u8 fc_fec_uncorrectable_blocks_lane1_high[0x20];
1082
1083 u8 fc_fec_uncorrectable_blocks_lane1_low[0x20];
1084
1085 u8 fc_fec_uncorrectable_blocks_lane2_high[0x20];
1086
1087 u8 fc_fec_uncorrectable_blocks_lane2_low[0x20];
1088
1089 u8 fc_fec_uncorrectable_blocks_lane3_high[0x20];
1090
1091 u8 fc_fec_uncorrectable_blocks_lane3_low[0x20];
1092
1093 u8 rs_fec_corrected_blocks_high[0x20];
1094
1095 u8 rs_fec_corrected_blocks_low[0x20];
1096
1097 u8 rs_fec_uncorrectable_blocks_high[0x20];
1098
1099 u8 rs_fec_uncorrectable_blocks_low[0x20];
1100
1101 u8 rs_fec_no_errors_blocks_high[0x20];
1102
1103 u8 rs_fec_no_errors_blocks_low[0x20];
1104
1105 u8 rs_fec_single_error_blocks_high[0x20];
1106
1107 u8 rs_fec_single_error_blocks_low[0x20];
1108
1109 u8 rs_fec_corrected_symbols_total_high[0x20];
1110
1111 u8 rs_fec_corrected_symbols_total_low[0x20];
1112
1113 u8 rs_fec_corrected_symbols_lane0_high[0x20];
1114
1115 u8 rs_fec_corrected_symbols_lane0_low[0x20];
1116
1117 u8 rs_fec_corrected_symbols_lane1_high[0x20];
1118
1119 u8 rs_fec_corrected_symbols_lane1_low[0x20];
1120
1121 u8 rs_fec_corrected_symbols_lane2_high[0x20];
1122
1123 u8 rs_fec_corrected_symbols_lane2_low[0x20];
1124
1125 u8 rs_fec_corrected_symbols_lane3_high[0x20];
1126
1127 u8 rs_fec_corrected_symbols_lane3_low[0x20];
1128
1129 u8 link_down_events[0x20];
1130
1131 u8 successful_recovery_events[0x20];
1132
1133 u8 reserved_0[0x180];
1134};
1135
1136struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
1137 u8 transmit_queue_high[0x20];
1138
1139 u8 transmit_queue_low[0x20];
1140
1141 u8 reserved_0[0x780];
1142};
1143
1144struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1145 u8 rx_octets_high[0x20];
1146
1147 u8 rx_octets_low[0x20];
1148
1149 u8 reserved_0[0xc0];
1150
1151 u8 rx_frames_high[0x20];
1152
1153 u8 rx_frames_low[0x20];
1154
1155 u8 tx_octets_high[0x20];
1156
1157 u8 tx_octets_low[0x20];
1158
1159 u8 reserved_1[0xc0];
1160
1161 u8 tx_frames_high[0x20];
1162
1163 u8 tx_frames_low[0x20];
1164
1165 u8 rx_pause_high[0x20];
1166
1167 u8 rx_pause_low[0x20];
1168
1169 u8 rx_pause_duration_high[0x20];
1170
1171 u8 rx_pause_duration_low[0x20];
1172
1173 u8 tx_pause_high[0x20];
1174
1175 u8 tx_pause_low[0x20];
1176
1177 u8 tx_pause_duration_high[0x20];
1178
1179 u8 tx_pause_duration_low[0x20];
1180
1181 u8 rx_pause_transition_high[0x20];
1182
1183 u8 rx_pause_transition_low[0x20];
1184
1185 u8 reserved_2[0x400];
1186};
1187
1188struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
1189 u8 port_transmit_wait_high[0x20];
1190
1191 u8 port_transmit_wait_low[0x20];
1192
1193 u8 reserved_0[0x780];
1194};
1195
1196struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
1197 u8 dot3stats_alignment_errors_high[0x20];
1198
1199 u8 dot3stats_alignment_errors_low[0x20];
1200
1201 u8 dot3stats_fcs_errors_high[0x20];
1202
1203 u8 dot3stats_fcs_errors_low[0x20];
1204
1205 u8 dot3stats_single_collision_frames_high[0x20];
1206
1207 u8 dot3stats_single_collision_frames_low[0x20];
1208
1209 u8 dot3stats_multiple_collision_frames_high[0x20];
1210
1211 u8 dot3stats_multiple_collision_frames_low[0x20];
1212
1213 u8 dot3stats_sqe_test_errors_high[0x20];
1214
1215 u8 dot3stats_sqe_test_errors_low[0x20];
1216
1217 u8 dot3stats_deferred_transmissions_high[0x20];
1218
1219 u8 dot3stats_deferred_transmissions_low[0x20];
1220
1221 u8 dot3stats_late_collisions_high[0x20];
1222
1223 u8 dot3stats_late_collisions_low[0x20];
1224
1225 u8 dot3stats_excessive_collisions_high[0x20];
1226
1227 u8 dot3stats_excessive_collisions_low[0x20];
1228
1229 u8 dot3stats_internal_mac_transmit_errors_high[0x20];
1230
1231 u8 dot3stats_internal_mac_transmit_errors_low[0x20];
1232
1233 u8 dot3stats_carrier_sense_errors_high[0x20];
1234
1235 u8 dot3stats_carrier_sense_errors_low[0x20];
1236
1237 u8 dot3stats_frame_too_longs_high[0x20];
1238
1239 u8 dot3stats_frame_too_longs_low[0x20];
1240
1241 u8 dot3stats_internal_mac_receive_errors_high[0x20];
1242
1243 u8 dot3stats_internal_mac_receive_errors_low[0x20];
1244
1245 u8 dot3stats_symbol_errors_high[0x20];
1246
1247 u8 dot3stats_symbol_errors_low[0x20];
1248
1249 u8 dot3control_in_unknown_opcodes_high[0x20];
1250
1251 u8 dot3control_in_unknown_opcodes_low[0x20];
1252
1253 u8 dot3in_pause_frames_high[0x20];
1254
1255 u8 dot3in_pause_frames_low[0x20];
1256
1257 u8 dot3out_pause_frames_high[0x20];
1258
1259 u8 dot3out_pause_frames_low[0x20];
1260
1261 u8 reserved_0[0x3c0];
1262};
1263
1264struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
1265 u8 ether_stats_drop_events_high[0x20];
1266
1267 u8 ether_stats_drop_events_low[0x20];
1268
1269 u8 ether_stats_octets_high[0x20];
1270
1271 u8 ether_stats_octets_low[0x20];
1272
1273 u8 ether_stats_pkts_high[0x20];
1274
1275 u8 ether_stats_pkts_low[0x20];
1276
1277 u8 ether_stats_broadcast_pkts_high[0x20];
1278
1279 u8 ether_stats_broadcast_pkts_low[0x20];
1280
1281 u8 ether_stats_multicast_pkts_high[0x20];
1282
1283 u8 ether_stats_multicast_pkts_low[0x20];
1284
1285 u8 ether_stats_crc_align_errors_high[0x20];
1286
1287 u8 ether_stats_crc_align_errors_low[0x20];
1288
1289 u8 ether_stats_undersize_pkts_high[0x20];
1290
1291 u8 ether_stats_undersize_pkts_low[0x20];
1292
1293 u8 ether_stats_oversize_pkts_high[0x20];
1294
1295 u8 ether_stats_oversize_pkts_low[0x20];
1296
1297 u8 ether_stats_fragments_high[0x20];
1298
1299 u8 ether_stats_fragments_low[0x20];
1300
1301 u8 ether_stats_jabbers_high[0x20];
1302
1303 u8 ether_stats_jabbers_low[0x20];
1304
1305 u8 ether_stats_collisions_high[0x20];
1306
1307 u8 ether_stats_collisions_low[0x20];
1308
1309 u8 ether_stats_pkts64octets_high[0x20];
1310
1311 u8 ether_stats_pkts64octets_low[0x20];
1312
1313 u8 ether_stats_pkts65to127octets_high[0x20];
1314
1315 u8 ether_stats_pkts65to127octets_low[0x20];
1316
1317 u8 ether_stats_pkts128to255octets_high[0x20];
1318
1319 u8 ether_stats_pkts128to255octets_low[0x20];
1320
1321 u8 ether_stats_pkts256to511octets_high[0x20];
1322
1323 u8 ether_stats_pkts256to511octets_low[0x20];
1324
1325 u8 ether_stats_pkts512to1023octets_high[0x20];
1326
1327 u8 ether_stats_pkts512to1023octets_low[0x20];
1328
1329 u8 ether_stats_pkts1024to1518octets_high[0x20];
1330
1331 u8 ether_stats_pkts1024to1518octets_low[0x20];
1332
1333 u8 ether_stats_pkts1519to2047octets_high[0x20];
1334
1335 u8 ether_stats_pkts1519to2047octets_low[0x20];
1336
1337 u8 ether_stats_pkts2048to4095octets_high[0x20];
1338
1339 u8 ether_stats_pkts2048to4095octets_low[0x20];
1340
1341 u8 ether_stats_pkts4096to8191octets_high[0x20];
1342
1343 u8 ether_stats_pkts4096to8191octets_low[0x20];
1344
1345 u8 ether_stats_pkts8192to10239octets_high[0x20];
1346
1347 u8 ether_stats_pkts8192to10239octets_low[0x20];
1348
1349 u8 reserved_0[0x280];
1350};
1351
1352struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
1353 u8 if_in_octets_high[0x20];
1354
1355 u8 if_in_octets_low[0x20];
1356
1357 u8 if_in_ucast_pkts_high[0x20];
1358
1359 u8 if_in_ucast_pkts_low[0x20];
1360
1361 u8 if_in_discards_high[0x20];
1362
1363 u8 if_in_discards_low[0x20];
1364
1365 u8 if_in_errors_high[0x20];
1366
1367 u8 if_in_errors_low[0x20];
1368
1369 u8 if_in_unknown_protos_high[0x20];
1370
1371 u8 if_in_unknown_protos_low[0x20];
1372
1373 u8 if_out_octets_high[0x20];
1374
1375 u8 if_out_octets_low[0x20];
1376
1377 u8 if_out_ucast_pkts_high[0x20];
1378
1379 u8 if_out_ucast_pkts_low[0x20];
1380
1381 u8 if_out_discards_high[0x20];
1382
1383 u8 if_out_discards_low[0x20];
1384
1385 u8 if_out_errors_high[0x20];
1386
1387 u8 if_out_errors_low[0x20];
1388
1389 u8 if_in_multicast_pkts_high[0x20];
1390
1391 u8 if_in_multicast_pkts_low[0x20];
1392
1393 u8 if_in_broadcast_pkts_high[0x20];
1394
1395 u8 if_in_broadcast_pkts_low[0x20];
1396
1397 u8 if_out_multicast_pkts_high[0x20];
1398
1399 u8 if_out_multicast_pkts_low[0x20];
1400
1401 u8 if_out_broadcast_pkts_high[0x20];
1402
1403 u8 if_out_broadcast_pkts_low[0x20];
1404
1405 u8 reserved_0[0x480];
1406};
1407
1408struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
1409 u8 a_frames_transmitted_ok_high[0x20];
1410
1411 u8 a_frames_transmitted_ok_low[0x20];
1412
1413 u8 a_frames_received_ok_high[0x20];
1414
1415 u8 a_frames_received_ok_low[0x20];
1416
1417 u8 a_frame_check_sequence_errors_high[0x20];
1418
1419 u8 a_frame_check_sequence_errors_low[0x20];
1420
1421 u8 a_alignment_errors_high[0x20];
1422
1423 u8 a_alignment_errors_low[0x20];
1424
1425 u8 a_octets_transmitted_ok_high[0x20];
1426
1427 u8 a_octets_transmitted_ok_low[0x20];
1428
1429 u8 a_octets_received_ok_high[0x20];
1430
1431 u8 a_octets_received_ok_low[0x20];
1432
1433 u8 a_multicast_frames_xmitted_ok_high[0x20];
1434
1435 u8 a_multicast_frames_xmitted_ok_low[0x20];
1436
1437 u8 a_broadcast_frames_xmitted_ok_high[0x20];
1438
1439 u8 a_broadcast_frames_xmitted_ok_low[0x20];
1440
1441 u8 a_multicast_frames_received_ok_high[0x20];
1442
1443 u8 a_multicast_frames_received_ok_low[0x20];
1444
1445 u8 a_broadcast_frames_received_ok_high[0x20];
1446
1447 u8 a_broadcast_frames_received_ok_low[0x20];
1448
1449 u8 a_in_range_length_errors_high[0x20];
1450
1451 u8 a_in_range_length_errors_low[0x20];
1452
1453 u8 a_out_of_range_length_field_high[0x20];
1454
1455 u8 a_out_of_range_length_field_low[0x20];
1456
1457 u8 a_frame_too_long_errors_high[0x20];
1458
1459 u8 a_frame_too_long_errors_low[0x20];
1460
1461 u8 a_symbol_error_during_carrier_high[0x20];
1462
1463 u8 a_symbol_error_during_carrier_low[0x20];
1464
1465 u8 a_mac_control_frames_transmitted_high[0x20];
1466
1467 u8 a_mac_control_frames_transmitted_low[0x20];
1468
1469 u8 a_mac_control_frames_received_high[0x20];
1470
1471 u8 a_mac_control_frames_received_low[0x20];
1472
1473 u8 a_unsupported_opcodes_received_high[0x20];
1474
1475 u8 a_unsupported_opcodes_received_low[0x20];
1476
1477 u8 a_pause_mac_ctrl_frames_received_high[0x20];
1478
1479 u8 a_pause_mac_ctrl_frames_received_low[0x20];
1480
1481 u8 a_pause_mac_ctrl_frames_transmitted_high[0x20];
1482
1483 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
1484
1485 u8 reserved_0[0x300];
1486};
1487
1488struct mlx5_ifc_cmd_inter_comp_event_bits {
1489 u8 command_completion_vector[0x20];
1490
1491 u8 reserved_0[0xc0];
1492};
1493
1494struct mlx5_ifc_stall_vl_event_bits {
1495 u8 reserved_0[0x18];
1496 u8 port_num[0x1];
1497 u8 reserved_1[0x3];
1498 u8 vl[0x4];
1499
1500 u8 reserved_2[0xa0];
1501};
1502
1503struct mlx5_ifc_db_bf_congestion_event_bits {
1504 u8 event_subtype[0x8];
1505 u8 reserved_0[0x8];
1506 u8 congestion_level[0x8];
1507 u8 reserved_1[0x8];
1508
1509 u8 reserved_2[0xa0];
1510};
1511
1512struct mlx5_ifc_gpio_event_bits {
1513 u8 reserved_0[0x60];
1514
1515 u8 gpio_event_hi[0x20];
1516
1517 u8 gpio_event_lo[0x20];
1518
1519 u8 reserved_1[0x40];
1520};
1521
1522struct mlx5_ifc_port_state_change_event_bits {
1523 u8 reserved_0[0x40];
1524
1525 u8 port_num[0x4];
1526 u8 reserved_1[0x1c];
1527
1528 u8 reserved_2[0x80];
1529};
1530
1531struct mlx5_ifc_dropped_packet_logged_bits {
1532 u8 reserved_0[0xe0];
1533};
1534
1535enum {
1536 MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
1537 MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
1538};
1539
1540struct mlx5_ifc_cq_error_bits {
1541 u8 reserved_0[0x8];
1542 u8 cqn[0x18];
1543
1544 u8 reserved_1[0x20];
1545
1546 u8 reserved_2[0x18];
1547 u8 syndrome[0x8];
1548
1549 u8 reserved_3[0x80];
1550};
1551
1552struct mlx5_ifc_rdma_page_fault_event_bits {
1553 u8 bytes_committed[0x20];
1554
1555 u8 r_key[0x20];
1556
1557 u8 reserved_0[0x10];
1558 u8 packet_len[0x10];
1559
1560 u8 rdma_op_len[0x20];
1561
1562 u8 rdma_va[0x40];
1563
1564 u8 reserved_1[0x5];
1565 u8 rdma[0x1];
1566 u8 write[0x1];
1567 u8 requestor[0x1];
1568 u8 qp_number[0x18];
1569};
1570
1571struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1572 u8 bytes_committed[0x20];
1573
1574 u8 reserved_0[0x10];
1575 u8 wqe_index[0x10];
1576
1577 u8 reserved_1[0x10];
1578 u8 len[0x10];
1579
1580 u8 reserved_2[0x60];
1581
1582 u8 reserved_3[0x5];
1583 u8 rdma[0x1];
1584 u8 write_read[0x1];
1585 u8 requestor[0x1];
1586 u8 qpn[0x18];
1587};
1588
1589struct mlx5_ifc_qp_events_bits {
1590 u8 reserved_0[0xa0];
1591
1592 u8 type[0x8];
1593 u8 reserved_1[0x18];
1594
1595 u8 reserved_2[0x8];
1596 u8 qpn_rqn_sqn[0x18];
1597};
1598
1599struct mlx5_ifc_dct_events_bits {
1600 u8 reserved_0[0xc0];
1601
1602 u8 reserved_1[0x8];
1603 u8 dct_number[0x18];
1604};
1605
1606struct mlx5_ifc_comp_event_bits {
1607 u8 reserved_0[0xc0];
1608
1609 u8 reserved_1[0x8];
1610 u8 cq_number[0x18];
1611};
1612
1613enum {
1614 MLX5_QPC_STATE_RST = 0x0,
1615 MLX5_QPC_STATE_INIT = 0x1,
1616 MLX5_QPC_STATE_RTR = 0x2,
1617 MLX5_QPC_STATE_RTS = 0x3,
1618 MLX5_QPC_STATE_SQER = 0x4,
1619 MLX5_QPC_STATE_ERR = 0x6,
1620 MLX5_QPC_STATE_SQD = 0x7,
1621 MLX5_QPC_STATE_SUSPENDED = 0x9,
1622};
1623
1624enum {
1625 MLX5_QPC_ST_RC = 0x0,
1626 MLX5_QPC_ST_UC = 0x1,
1627 MLX5_QPC_ST_UD = 0x2,
1628 MLX5_QPC_ST_XRC = 0x3,
1629 MLX5_QPC_ST_DCI = 0x5,
1630 MLX5_QPC_ST_QP0 = 0x7,
1631 MLX5_QPC_ST_QP1 = 0x8,
1632 MLX5_QPC_ST_RAW_DATAGRAM = 0x9,
1633 MLX5_QPC_ST_REG_UMR = 0xc,
1634};
1635
1636enum {
1637 MLX5_QPC_PM_STATE_ARMED = 0x0,
1638 MLX5_QPC_PM_STATE_REARM = 0x1,
1639 MLX5_QPC_PM_STATE_RESERVED = 0x2,
1640 MLX5_QPC_PM_STATE_MIGRATED = 0x3,
1641};
1642
1643enum {
1644 MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
1645 MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
1646};
1647
1648enum {
1649 MLX5_QPC_MTU_256_BYTES = 0x1,
1650 MLX5_QPC_MTU_512_BYTES = 0x2,
1651 MLX5_QPC_MTU_1K_BYTES = 0x3,
1652 MLX5_QPC_MTU_2K_BYTES = 0x4,
1653 MLX5_QPC_MTU_4K_BYTES = 0x5,
1654 MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7,
1655};
1656
1657enum {
1658 MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1,
1659 MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2,
1660 MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3,
1661 MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4,
1662 MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5,
1663 MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6,
1664 MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7,
1665 MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8,
1666};
1667
1668enum {
1669 MLX5_QPC_CS_REQ_DISABLE = 0x0,
1670 MLX5_QPC_CS_REQ_UP_TO_32B = 0x11,
1671 MLX5_QPC_CS_REQ_UP_TO_64B = 0x22,
1672};
1673
1674enum {
1675 MLX5_QPC_CS_RES_DISABLE = 0x0,
1676 MLX5_QPC_CS_RES_UP_TO_32B = 0x1,
1677 MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
1678};
1679
1680struct mlx5_ifc_qpc_bits {
1681 u8 state[0x4];
1682 u8 reserved_0[0x4];
1683 u8 st[0x8];
1684 u8 reserved_1[0x3];
1685 u8 pm_state[0x2];
1686 u8 reserved_2[0x7];
1687 u8 end_padding_mode[0x2];
1688 u8 reserved_3[0x2];
1689
1690 u8 wq_signature[0x1];
1691 u8 block_lb_mc[0x1];
1692 u8 atomic_like_write_en[0x1];
1693 u8 latency_sensitive[0x1];
1694 u8 reserved_4[0x1];
1695 u8 drain_sigerr[0x1];
1696 u8 reserved_5[0x2];
1697 u8 pd[0x18];
1698
1699 u8 mtu[0x3];
1700 u8 log_msg_max[0x5];
1701 u8 reserved_6[0x1];
1702 u8 log_rq_size[0x4];
1703 u8 log_rq_stride[0x3];
1704 u8 no_sq[0x1];
1705 u8 log_sq_size[0x4];
1706 u8 reserved_7[0x6];
1707 u8 rlky[0x1];
1708 u8 reserved_8[0x4];
1709
1710 u8 counter_set_id[0x8];
1711 u8 uar_page[0x18];
1712
1713 u8 reserved_9[0x8];
1714 u8 user_index[0x18];
1715
1716 u8 reserved_10[0x3];
1717 u8 log_page_size[0x5];
1718 u8 remote_qpn[0x18];
1719
1720 struct mlx5_ifc_ads_bits primary_address_path;
1721
1722 struct mlx5_ifc_ads_bits secondary_address_path;
1723
1724 u8 log_ack_req_freq[0x4];
1725 u8 reserved_11[0x4];
1726 u8 log_sra_max[0x3];
1727 u8 reserved_12[0x2];
1728 u8 retry_count[0x3];
1729 u8 rnr_retry[0x3];
1730 u8 reserved_13[0x1];
1731 u8 fre[0x1];
1732 u8 cur_rnr_retry[0x3];
1733 u8 cur_retry_count[0x3];
1734 u8 reserved_14[0x5];
1735
1736 u8 reserved_15[0x20];
1737
1738 u8 reserved_16[0x8];
1739 u8 next_send_psn[0x18];
1740
1741 u8 reserved_17[0x8];
1742 u8 cqn_snd[0x18];
1743
1744 u8 reserved_18[0x40];
1745
1746 u8 reserved_19[0x8];
1747 u8 last_acked_psn[0x18];
1748
1749 u8 reserved_20[0x8];
1750 u8 ssn[0x18];
1751
1752 u8 reserved_21[0x8];
1753 u8 log_rra_max[0x3];
1754 u8 reserved_22[0x1];
1755 u8 atomic_mode[0x4];
1756 u8 rre[0x1];
1757 u8 rwe[0x1];
1758 u8 rae[0x1];
1759 u8 reserved_23[0x1];
1760 u8 page_offset[0x6];
1761 u8 reserved_24[0x3];
1762 u8 cd_slave_receive[0x1];
1763 u8 cd_slave_send[0x1];
1764 u8 cd_master[0x1];
1765
1766 u8 reserved_25[0x3];
1767 u8 min_rnr_nak[0x5];
1768 u8 next_rcv_psn[0x18];
1769
1770 u8 reserved_26[0x8];
1771 u8 xrcd[0x18];
1772
1773 u8 reserved_27[0x8];
1774 u8 cqn_rcv[0x18];
1775
1776 u8 dbr_addr[0x40];
1777
1778 u8 q_key[0x20];
1779
1780 u8 reserved_28[0x5];
1781 u8 rq_type[0x3];
1782 u8 srqn_rmpn[0x18];
1783
1784 u8 reserved_29[0x8];
1785 u8 rmsn[0x18];
1786
1787 u8 hw_sq_wqebb_counter[0x10];
1788 u8 sw_sq_wqebb_counter[0x10];
1789
1790 u8 hw_rq_counter[0x20];
1791
1792 u8 sw_rq_counter[0x20];
1793
1794 u8 reserved_30[0x20];
1795
1796 u8 reserved_31[0xf];
1797 u8 cgs[0x1];
1798 u8 cs_req[0x8];
1799 u8 cs_res[0x8];
1800
1801 u8 dc_access_key[0x40];
1802
1803 u8 reserved_32[0xc0];
1804};
1805
1806struct mlx5_ifc_roce_addr_layout_bits {
1807 u8 source_l3_address[16][0x8];
1808
1809 u8 reserved_0[0x3];
1810 u8 vlan_valid[0x1];
1811 u8 vlan_id[0xc];
1812 u8 source_mac_47_32[0x10];
1813
1814 u8 source_mac_31_0[0x20];
1815
1816 u8 reserved_1[0x14];
1817 u8 roce_l3_type[0x4];
1818 u8 roce_version[0x8];
1819
1820 u8 reserved_2[0x20];
1821};
1822
1823union mlx5_ifc_hca_cap_union_bits {
1824 struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
1825 struct mlx5_ifc_odp_cap_bits odp_cap;
1826 struct mlx5_ifc_atomic_caps_bits atomic_caps;
1827 struct mlx5_ifc_roce_cap_bits roce_cap;
1828 struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
1829 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1830 u8 reserved_0[0x8000];
1831};
1832
1833enum {
1834 MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
1835 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
1836 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
1837};
1838
1839struct mlx5_ifc_flow_context_bits {
1840 u8 reserved_0[0x20];
1841
1842 u8 group_id[0x20];
1843
1844 u8 reserved_1[0x8];
1845 u8 flow_tag[0x18];
1846
1847 u8 reserved_2[0x10];
1848 u8 action[0x10];
1849
1850 u8 reserved_3[0x8];
1851 u8 destination_list_size[0x18];
1852
1853 u8 reserved_4[0x160];
1854
1855 struct mlx5_ifc_fte_match_param_bits match_value;
1856
1857 u8 reserved_5[0x600];
1858
1859 struct mlx5_ifc_dest_format_struct_bits destination[0];
1860};
1861
1862enum {
1863 MLX5_XRC_SRQC_STATE_GOOD = 0x0,
1864 MLX5_XRC_SRQC_STATE_ERROR = 0x1,
1865};
1866
1867struct mlx5_ifc_xrc_srqc_bits {
1868 u8 state[0x4];
1869 u8 log_xrc_srq_size[0x4];
1870 u8 reserved_0[0x18];
1871
1872 u8 wq_signature[0x1];
1873 u8 cont_srq[0x1];
1874 u8 reserved_1[0x1];
1875 u8 rlky[0x1];
1876 u8 basic_cyclic_rcv_wqe[0x1];
1877 u8 log_rq_stride[0x3];
1878 u8 xrcd[0x18];
1879
1880 u8 page_offset[0x6];
1881 u8 reserved_2[0x2];
1882 u8 cqn[0x18];
1883
1884 u8 reserved_3[0x20];
1885
1886 u8 user_index_equal_xrc_srqn[0x1];
1887 u8 reserved_4[0x1];
1888 u8 log_page_size[0x6];
1889 u8 user_index[0x18];
1890
1891 u8 reserved_5[0x20];
1892
1893 u8 reserved_6[0x8];
1894 u8 pd[0x18];
1895
1896 u8 lwm[0x10];
1897 u8 wqe_cnt[0x10];
1898
1899 u8 reserved_7[0x40];
1900
1901 u8 db_record_addr_h[0x20];
1902
1903 u8 db_record_addr_l[0x1e];
1904 u8 reserved_8[0x2];
1905
1906 u8 reserved_9[0x80];
1907};
1908
1909struct mlx5_ifc_traffic_counter_bits {
1910 u8 packets[0x40];
1911
1912 u8 octets[0x40];
1913};
1914
1915struct mlx5_ifc_tisc_bits {
1916 u8 reserved_0[0xc];
1917 u8 prio[0x4];
1918 u8 reserved_1[0x10];
1919
1920 u8 reserved_2[0x100];
1921
1922 u8 reserved_3[0x8];
1923 u8 transport_domain[0x18];
1924
1925 u8 reserved_4[0x3c0];
1926};
1927
1928enum {
1929 MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
1930 MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
1931};
1932
1933enum {
1934 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
1935 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
1936};
1937
1938enum {
1939 MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
1940 MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
1941 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
1942};
1943
1944enum {
1945 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
1946 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
1947};
1948
1949struct mlx5_ifc_tirc_bits {
1950 u8 reserved_0[0x20];
1951
1952 u8 disp_type[0x4];
1953 u8 reserved_1[0x1c];
1954
1955 u8 reserved_2[0x40];
1956
1957 u8 reserved_3[0x4];
1958 u8 lro_timeout_period_usecs[0x10];
1959 u8 lro_enable_mask[0x4];
1960 u8 lro_max_ip_payload_size[0x8];
1961
1962 u8 reserved_4[0x40];
1963
1964 u8 reserved_5[0x8];
1965 u8 inline_rqn[0x18];
1966
1967 u8 rx_hash_symmetric[0x1];
1968 u8 reserved_6[0x1];
1969 u8 tunneled_offload_en[0x1];
1970 u8 reserved_7[0x5];
1971 u8 indirect_table[0x18];
1972
1973 u8 rx_hash_fn[0x4];
1974 u8 reserved_8[0x2];
1975 u8 self_lb_block[0x2];
1976 u8 transport_domain[0x18];
1977
1978 u8 rx_hash_toeplitz_key[10][0x20];
1979
1980 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
1981
1982 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
1983
1984 u8 reserved_9[0x4c0];
1985};
1986
1987enum {
1988 MLX5_SRQC_STATE_GOOD = 0x0,
1989 MLX5_SRQC_STATE_ERROR = 0x1,
1990};
1991
1992struct mlx5_ifc_srqc_bits {
1993 u8 state[0x4];
1994 u8 log_srq_size[0x4];
1995 u8 reserved_0[0x18];
1996
1997 u8 wq_signature[0x1];
1998 u8 cont_srq[0x1];
1999 u8 reserved_1[0x1];
2000 u8 rlky[0x1];
2001 u8 reserved_2[0x1];
2002 u8 log_rq_stride[0x3];
2003 u8 xrcd[0x18];
2004
2005 u8 page_offset[0x6];
2006 u8 reserved_3[0x2];
2007 u8 cqn[0x18];
2008
2009 u8 reserved_4[0x20];
2010
2011 u8 reserved_5[0x2];
2012 u8 log_page_size[0x6];
2013 u8 reserved_6[0x18];
2014
2015 u8 reserved_7[0x20];
2016
2017 u8 reserved_8[0x8];
2018 u8 pd[0x18];
2019
2020 u8 lwm[0x10];
2021 u8 wqe_cnt[0x10];
2022
2023 u8 reserved_9[0x40];
2024
2025 u8 dbr_addr[0x40];
2026
2027 u8 reserved_10[0x80];
2028};
2029
2030enum {
2031 MLX5_SQC_STATE_RST = 0x0,
2032 MLX5_SQC_STATE_RDY = 0x1,
2033 MLX5_SQC_STATE_ERR = 0x3,
2034};
2035
2036struct mlx5_ifc_sqc_bits {
2037 u8 rlky[0x1];
2038 u8 cd_master[0x1];
2039 u8 fre[0x1];
2040 u8 flush_in_error_en[0x1];
2041 u8 reserved_0[0x4];
2042 u8 state[0x4];
2043 u8 reserved_1[0x14];
2044
2045 u8 reserved_2[0x8];
2046 u8 user_index[0x18];
2047
2048 u8 reserved_3[0x8];
2049 u8 cqn[0x18];
2050
2051 u8 reserved_4[0xa0];
2052
2053 u8 tis_lst_sz[0x10];
2054 u8 reserved_5[0x10];
2055
2056 u8 reserved_6[0x40];
2057
2058 u8 reserved_7[0x8];
2059 u8 tis_num_0[0x18];
2060
2061 struct mlx5_ifc_wq_bits wq;
2062};
2063
2064struct mlx5_ifc_rqtc_bits {
2065 u8 reserved_0[0xa0];
2066
2067 u8 reserved_1[0x10];
2068 u8 rqt_max_size[0x10];
2069
2070 u8 reserved_2[0x10];
2071 u8 rqt_actual_size[0x10];
2072
2073 u8 reserved_3[0x6a0];
2074
2075 struct mlx5_ifc_rq_num_bits rq_num[0];
2076};
2077
2078enum {
2079 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
2080 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
2081};
2082
2083enum {
2084 MLX5_RQC_STATE_RST = 0x0,
2085 MLX5_RQC_STATE_RDY = 0x1,
2086 MLX5_RQC_STATE_ERR = 0x3,
2087};
2088
2089struct mlx5_ifc_rqc_bits {
2090 u8 rlky[0x1];
2091 u8 reserved_0[0x2];
2092 u8 vsd[0x1];
2093 u8 mem_rq_type[0x4];
2094 u8 state[0x4];
2095 u8 reserved_1[0x1];
2096 u8 flush_in_error_en[0x1];
2097 u8 reserved_2[0x12];
2098
2099 u8 reserved_3[0x8];
2100 u8 user_index[0x18];
2101
2102 u8 reserved_4[0x8];
2103 u8 cqn[0x18];
2104
2105 u8 counter_set_id[0x8];
2106 u8 reserved_5[0x18];
2107
2108 u8 reserved_6[0x8];
2109 u8 rmpn[0x18];
2110
2111 u8 reserved_7[0xe0];
2112
2113 struct mlx5_ifc_wq_bits wq;
2114};
2115
2116enum {
2117 MLX5_RMPC_STATE_RDY = 0x1,
2118 MLX5_RMPC_STATE_ERR = 0x3,
2119};
2120
2121struct mlx5_ifc_rmpc_bits {
2122 u8 reserved_0[0x8];
2123 u8 state[0x4];
2124 u8 reserved_1[0x14];
2125
2126 u8 basic_cyclic_rcv_wqe[0x1];
2127 u8 reserved_2[0x1f];
2128
2129 u8 reserved_3[0x140];
2130
2131 struct mlx5_ifc_wq_bits wq;
2132};
2133
2134enum {
2135 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
2136};
2137
2138struct mlx5_ifc_nic_vport_context_bits {
2139 u8 reserved_0[0x1f];
2140 u8 roce_en[0x1];
2141
2142 u8 reserved_1[0x760];
2143
2144 u8 reserved_2[0x5];
2145 u8 allowed_list_type[0x3];
2146 u8 reserved_3[0xc];
2147 u8 allowed_list_size[0xc];
2148
2149 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2150
2151 u8 reserved_4[0x20];
2152
2153 u8 current_uc_mac_address[0][0x40];
2154};
2155
2156enum {
2157 MLX5_MKC_ACCESS_MODE_PA = 0x0,
2158 MLX5_MKC_ACCESS_MODE_MTT = 0x1,
2159 MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
2160};
2161
2162struct mlx5_ifc_mkc_bits {
2163 u8 reserved_0[0x1];
2164 u8 free[0x1];
2165 u8 reserved_1[0xd];
2166 u8 small_fence_on_rdma_read_response[0x1];
2167 u8 umr_en[0x1];
2168 u8 a[0x1];
2169 u8 rw[0x1];
2170 u8 rr[0x1];
2171 u8 lw[0x1];
2172 u8 lr[0x1];
2173 u8 access_mode[0x2];
2174 u8 reserved_2[0x8];
2175
2176 u8 qpn[0x18];
2177 u8 mkey_7_0[0x8];
2178
2179 u8 reserved_3[0x20];
2180
2181 u8 length64[0x1];
2182 u8 bsf_en[0x1];
2183 u8 sync_umr[0x1];
2184 u8 reserved_4[0x2];
2185 u8 expected_sigerr_count[0x1];
2186 u8 reserved_5[0x1];
2187 u8 en_rinval[0x1];
2188 u8 pd[0x18];
2189
2190 u8 start_addr[0x40];
2191
2192 u8 len[0x40];
2193
2194 u8 bsf_octword_size[0x20];
2195
2196 u8 reserved_6[0x80];
2197
2198 u8 translations_octword_size[0x20];
2199
2200 u8 reserved_7[0x1b];
2201 u8 log_page_size[0x5];
2202
2203 u8 reserved_8[0x20];
2204};
2205
2206struct mlx5_ifc_pkey_bits {
2207 u8 reserved_0[0x10];
2208 u8 pkey[0x10];
2209};
2210
2211struct mlx5_ifc_array128_auto_bits {
2212 u8 array128_auto[16][0x8];
2213};
2214
2215struct mlx5_ifc_hca_vport_context_bits {
2216 u8 field_select[0x20];
2217
2218 u8 reserved_0[0xe0];
2219
2220 u8 sm_virt_aware[0x1];
2221 u8 has_smi[0x1];
2222 u8 has_raw[0x1];
2223 u8 grh_required[0x1];
2224 u8 reserved_1[0xc];
2225 u8 port_physical_state[0x4];
2226 u8 vport_state_policy[0x4];
2227 u8 port_state[0x4];
2228 u8 vport_state[0x4];
2229
2230 u8 reserved_2[0x20];
2231
2232 u8 system_image_guid[0x40];
2233
2234 u8 port_guid[0x40];
2235
2236 u8 node_guid[0x40];
2237
2238 u8 cap_mask1[0x20];
2239
2240 u8 cap_mask1_field_select[0x20];
2241
2242 u8 cap_mask2[0x20];
2243
2244 u8 cap_mask2_field_select[0x20];
2245
2246 u8 reserved_3[0x80];
2247
2248 u8 lid[0x10];
2249 u8 reserved_4[0x4];
2250 u8 init_type_reply[0x4];
2251 u8 lmc[0x3];
2252 u8 subnet_timeout[0x5];
2253
2254 u8 sm_lid[0x10];
2255 u8 sm_sl[0x4];
2256 u8 reserved_5[0xc];
2257
2258 u8 qkey_violation_counter[0x10];
2259 u8 pkey_violation_counter[0x10];
2260
2261 u8 reserved_6[0xca0];
2262};
2263
2264enum {
2265 MLX5_EQC_STATUS_OK = 0x0,
2266 MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
2267};
2268
2269enum {
2270 MLX5_EQC_ST_ARMED = 0x9,
2271 MLX5_EQC_ST_FIRED = 0xa,
2272};
2273
2274struct mlx5_ifc_eqc_bits {
2275 u8 status[0x4];
2276 u8 reserved_0[0x9];
2277 u8 ec[0x1];
2278 u8 oi[0x1];
2279 u8 reserved_1[0x5];
2280 u8 st[0x4];
2281 u8 reserved_2[0x8];
2282
2283 u8 reserved_3[0x20];
2284
2285 u8 reserved_4[0x14];
2286 u8 page_offset[0x6];
2287 u8 reserved_5[0x6];
2288
2289 u8 reserved_6[0x3];
2290 u8 log_eq_size[0x5];
2291 u8 uar_page[0x18];
2292
2293 u8 reserved_7[0x20];
2294
2295 u8 reserved_8[0x18];
2296 u8 intr[0x8];
2297
2298 u8 reserved_9[0x3];
2299 u8 log_page_size[0x5];
2300 u8 reserved_10[0x18];
2301
2302 u8 reserved_11[0x60];
2303
2304 u8 reserved_12[0x8];
2305 u8 consumer_counter[0x18];
2306
2307 u8 reserved_13[0x8];
2308 u8 producer_counter[0x18];
2309
2310 u8 reserved_14[0x80];
2311};
2312
2313enum {
2314 MLX5_DCTC_STATE_ACTIVE = 0x0,
2315 MLX5_DCTC_STATE_DRAINING = 0x1,
2316 MLX5_DCTC_STATE_DRAINED = 0x2,
2317};
2318
2319enum {
2320 MLX5_DCTC_CS_RES_DISABLE = 0x0,
2321 MLX5_DCTC_CS_RES_NA = 0x1,
2322 MLX5_DCTC_CS_RES_UP_TO_64B = 0x2,
2323};
2324
2325enum {
2326 MLX5_DCTC_MTU_256_BYTES = 0x1,
2327 MLX5_DCTC_MTU_512_BYTES = 0x2,
2328 MLX5_DCTC_MTU_1K_BYTES = 0x3,
2329 MLX5_DCTC_MTU_2K_BYTES = 0x4,
2330 MLX5_DCTC_MTU_4K_BYTES = 0x5,
2331};
2332
2333struct mlx5_ifc_dctc_bits {
2334 u8 reserved_0[0x4];
2335 u8 state[0x4];
2336 u8 reserved_1[0x18];
2337
2338 u8 reserved_2[0x8];
2339 u8 user_index[0x18];
2340
2341 u8 reserved_3[0x8];
2342 u8 cqn[0x18];
2343
2344 u8 counter_set_id[0x8];
2345 u8 atomic_mode[0x4];
2346 u8 rre[0x1];
2347 u8 rwe[0x1];
2348 u8 rae[0x1];
2349 u8 atomic_like_write_en[0x1];
2350 u8 latency_sensitive[0x1];
2351 u8 rlky[0x1];
2352 u8 free_ar[0x1];
2353 u8 reserved_4[0xd];
2354
2355 u8 reserved_5[0x8];
2356 u8 cs_res[0x8];
2357 u8 reserved_6[0x3];
2358 u8 min_rnr_nak[0x5];
2359 u8 reserved_7[0x8];
2360
2361 u8 reserved_8[0x8];
2362 u8 srqn[0x18];
2363
2364 u8 reserved_9[0x8];
2365 u8 pd[0x18];
2366
2367 u8 tclass[0x8];
2368 u8 reserved_10[0x4];
2369 u8 flow_label[0x14];
2370
2371 u8 dc_access_key[0x40];
2372
2373 u8 reserved_11[0x5];
2374 u8 mtu[0x3];
2375 u8 port[0x8];
2376 u8 pkey_index[0x10];
2377
2378 u8 reserved_12[0x8];
2379 u8 my_addr_index[0x8];
2380 u8 reserved_13[0x8];
2381 u8 hop_limit[0x8];
2382
2383 u8 dc_access_key_violation_count[0x20];
2384
2385 u8 reserved_14[0x14];
2386 u8 dei_cfi[0x1];
2387 u8 eth_prio[0x3];
2388 u8 ecn[0x2];
2389 u8 dscp[0x6];
2390
2391 u8 reserved_15[0x40];
2392};
2393
2394enum {
2395 MLX5_CQC_STATUS_OK = 0x0,
2396 MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9,
2397 MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa,
2398};
2399
2400enum {
2401 MLX5_CQC_CQE_SZ_64_BYTES = 0x0,
2402 MLX5_CQC_CQE_SZ_128_BYTES = 0x1,
2403};
2404
2405enum {
2406 MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6,
2407 MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9,
2408 MLX5_CQC_ST_FIRED = 0xa,
2409};
2410
2411struct mlx5_ifc_cqc_bits {
2412 u8 status[0x4];
2413 u8 reserved_0[0x4];
2414 u8 cqe_sz[0x3];
2415 u8 cc[0x1];
2416 u8 reserved_1[0x1];
2417 u8 scqe_break_moderation_en[0x1];
2418 u8 oi[0x1];
2419 u8 reserved_2[0x2];
2420 u8 cqe_zip_en[0x1];
2421 u8 mini_cqe_res_format[0x2];
2422 u8 st[0x4];
2423 u8 reserved_3[0x8];
2424
2425 u8 reserved_4[0x20];
2426
2427 u8 reserved_5[0x14];
2428 u8 page_offset[0x6];
2429 u8 reserved_6[0x6];
2430
2431 u8 reserved_7[0x3];
2432 u8 log_cq_size[0x5];
2433 u8 uar_page[0x18];
2434
2435 u8 reserved_8[0x4];
2436 u8 cq_period[0xc];
2437 u8 cq_max_count[0x10];
2438
2439 u8 reserved_9[0x18];
2440 u8 c_eqn[0x8];
2441
2442 u8 reserved_10[0x3];
2443 u8 log_page_size[0x5];
2444 u8 reserved_11[0x18];
2445
2446 u8 reserved_12[0x20];
2447
2448 u8 reserved_13[0x8];
2449 u8 last_notified_index[0x18];
2450
2451 u8 reserved_14[0x8];
2452 u8 last_solicit_index[0x18];
2453
2454 u8 reserved_15[0x8];
2455 u8 consumer_counter[0x18];
2456
2457 u8 reserved_16[0x8];
2458 u8 producer_counter[0x18];
2459
2460 u8 reserved_17[0x40];
2461
2462 u8 dbr_addr[0x40];
2463};
2464
2465union mlx5_ifc_cong_control_roce_ecn_auto_bits {
2466 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
2467 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
2468 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
2469 u8 reserved_0[0x800];
2470};
2471
2472struct mlx5_ifc_query_adapter_param_block_bits {
2473 u8 reserved_0[0xc0];
2474
2475 u8 reserved_1[0x8];
2476 u8 ieee_vendor_id[0x18];
2477
2478 u8 reserved_2[0x10];
2479 u8 vsd_vendor_id[0x10];
2480
2481 u8 vsd[208][0x8];
2482
2483 u8 vsd_contd_psid[16][0x8];
2484};
2485
2486union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
2487 struct mlx5_ifc_modify_field_select_bits modify_field_select;
2488 struct mlx5_ifc_resize_field_select_bits resize_field_select;
2489 u8 reserved_0[0x20];
2490};
2491
2492union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
2493 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
2494 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
2495 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
2496 u8 reserved_0[0x20];
2497};
2498
2499union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2500 struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
2501 struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
2502 struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
2503 struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
2504 struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
2505 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
2506 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
2507 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
2508 u8 reserved_0[0x7c0];
2509};
2510
2511union mlx5_ifc_event_auto_bits {
2512 struct mlx5_ifc_comp_event_bits comp_event;
2513 struct mlx5_ifc_dct_events_bits dct_events;
2514 struct mlx5_ifc_qp_events_bits qp_events;
2515 struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
2516 struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
2517 struct mlx5_ifc_cq_error_bits cq_error;
2518 struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
2519 struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
2520 struct mlx5_ifc_gpio_event_bits gpio_event;
2521 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
2522 struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
2523 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
2524 u8 reserved_0[0xe0];
2525};
2526
2527struct mlx5_ifc_health_buffer_bits {
2528 u8 reserved_0[0x100];
2529
2530 u8 assert_existptr[0x20];
2531
2532 u8 assert_callra[0x20];
2533
2534 u8 reserved_1[0x40];
2535
2536 u8 fw_version[0x20];
2537
2538 u8 hw_id[0x20];
2539
2540 u8 reserved_2[0x20];
2541
2542 u8 irisc_index[0x8];
2543 u8 synd[0x8];
2544 u8 ext_synd[0x10];
2545};
2546
2547struct mlx5_ifc_register_loopback_control_bits {
2548 u8 no_lb[0x1];
2549 u8 reserved_0[0x7];
2550 u8 port[0x8];
2551 u8 reserved_1[0x10];
2552
2553 u8 reserved_2[0x60];
2554};
2555
2556struct mlx5_ifc_teardown_hca_out_bits {
2557 u8 status[0x8];
2558 u8 reserved_0[0x18];
2559
2560 u8 syndrome[0x20];
2561
2562 u8 reserved_1[0x40];
2563};
2564
2565enum {
2566 MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
2567 MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
2568};
2569
2570struct mlx5_ifc_teardown_hca_in_bits {
2571 u8 opcode[0x10];
2572 u8 reserved_0[0x10];
2573
2574 u8 reserved_1[0x10];
2575 u8 op_mod[0x10];
2576
2577 u8 reserved_2[0x10];
2578 u8 profile[0x10];
2579
2580 u8 reserved_3[0x20];
2581};
2582
2583struct mlx5_ifc_sqerr2rts_qp_out_bits {
2584 u8 status[0x8];
2585 u8 reserved_0[0x18];
2586
2587 u8 syndrome[0x20];
2588
2589 u8 reserved_1[0x40];
2590};
2591
2592struct mlx5_ifc_sqerr2rts_qp_in_bits {
2593 u8 opcode[0x10];
2594 u8 reserved_0[0x10];
2595
2596 u8 reserved_1[0x10];
2597 u8 op_mod[0x10];
2598
2599 u8 reserved_2[0x8];
2600 u8 qpn[0x18];
2601
2602 u8 reserved_3[0x20];
2603
2604 u8 opt_param_mask[0x20];
2605
2606 u8 reserved_4[0x20];
2607
2608 struct mlx5_ifc_qpc_bits qpc;
2609
2610 u8 reserved_5[0x80];
2611};
2612
2613struct mlx5_ifc_sqd2rts_qp_out_bits {
2614 u8 status[0x8];
2615 u8 reserved_0[0x18];
2616
2617 u8 syndrome[0x20];
2618
2619 u8 reserved_1[0x40];
2620};
2621
2622struct mlx5_ifc_sqd2rts_qp_in_bits {
2623 u8 opcode[0x10];
2624 u8 reserved_0[0x10];
2625
2626 u8 reserved_1[0x10];
2627 u8 op_mod[0x10];
2628
2629 u8 reserved_2[0x8];
2630 u8 qpn[0x18];
2631
2632 u8 reserved_3[0x20];
2633
2634 u8 opt_param_mask[0x20];
2635
2636 u8 reserved_4[0x20];
2637
2638 struct mlx5_ifc_qpc_bits qpc;
2639
2640 u8 reserved_5[0x80];
2641};
2642
2643struct mlx5_ifc_set_roce_address_out_bits {
2644 u8 status[0x8];
2645 u8 reserved_0[0x18];
2646
2647 u8 syndrome[0x20];
2648
2649 u8 reserved_1[0x40];
2650};
2651
2652struct mlx5_ifc_set_roce_address_in_bits {
2653 u8 opcode[0x10];
2654 u8 reserved_0[0x10];
2655
2656 u8 reserved_1[0x10];
2657 u8 op_mod[0x10];
2658
2659 u8 roce_address_index[0x10];
2660 u8 reserved_2[0x10];
2661
2662 u8 reserved_3[0x20];
2663
2664 struct mlx5_ifc_roce_addr_layout_bits roce_address;
2665};
2666
2667struct mlx5_ifc_set_mad_demux_out_bits {
2668 u8 status[0x8];
2669 u8 reserved_0[0x18];
2670
2671 u8 syndrome[0x20];
2672
2673 u8 reserved_1[0x40];
2674};
2675
2676enum {
2677 MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0,
2678 MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2,
2679};
2680
2681struct mlx5_ifc_set_mad_demux_in_bits {
2682 u8 opcode[0x10];
2683 u8 reserved_0[0x10];
2684
2685 u8 reserved_1[0x10];
2686 u8 op_mod[0x10];
2687
2688 u8 reserved_2[0x20];
2689
2690 u8 reserved_3[0x6];
2691 u8 demux_mode[0x2];
2692 u8 reserved_4[0x18];
2693};
2694
2695struct mlx5_ifc_set_l2_table_entry_out_bits {
2696 u8 status[0x8];
2697 u8 reserved_0[0x18];
2698
2699 u8 syndrome[0x20];
2700
2701 u8 reserved_1[0x40];
2702};
2703
2704struct mlx5_ifc_set_l2_table_entry_in_bits {
2705 u8 opcode[0x10];
2706 u8 reserved_0[0x10];
2707
2708 u8 reserved_1[0x10];
2709 u8 op_mod[0x10];
2710
2711 u8 reserved_2[0x60];
2712
2713 u8 reserved_3[0x8];
2714 u8 table_index[0x18];
2715
2716 u8 reserved_4[0x20];
2717
2718 u8 reserved_5[0x13];
2719 u8 vlan_valid[0x1];
2720 u8 vlan[0xc];
2721
2722 struct mlx5_ifc_mac_address_layout_bits mac_address;
2723
2724 u8 reserved_6[0xc0];
2725};
2726
2727struct mlx5_ifc_set_issi_out_bits {
2728 u8 status[0x8];
2729 u8 reserved_0[0x18];
2730
2731 u8 syndrome[0x20];
2732
2733 u8 reserved_1[0x40];
2734};
2735
2736struct mlx5_ifc_set_issi_in_bits {
2737 u8 opcode[0x10];
2738 u8 reserved_0[0x10];
2739
2740 u8 reserved_1[0x10];
2741 u8 op_mod[0x10];
2742
2743 u8 reserved_2[0x10];
2744 u8 current_issi[0x10];
2745
2746 u8 reserved_3[0x20];
2747};
2748
2749struct mlx5_ifc_set_hca_cap_out_bits {
2750 u8 status[0x8];
2751 u8 reserved_0[0x18];
2752
2753 u8 syndrome[0x20];
2754
2755 u8 reserved_1[0x40];
305}; 2756};
306 2757
307struct mlx5_ifc_set_hca_cap_in_bits { 2758struct mlx5_ifc_set_hca_cap_in_bits {
@@ -313,10 +2764,653 @@ struct mlx5_ifc_set_hca_cap_in_bits {
313 2764
314 u8 reserved_2[0x40]; 2765 u8 reserved_2[0x40];
315 2766
316 struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; 2767 union mlx5_ifc_hca_cap_union_bits capability;
317}; 2768};
318 2769
319struct mlx5_ifc_query_hca_cap_in_bits { 2770struct mlx5_ifc_set_fte_out_bits {
2771 u8 status[0x8];
2772 u8 reserved_0[0x18];
2773
2774 u8 syndrome[0x20];
2775
2776 u8 reserved_1[0x40];
2777};
2778
2779struct mlx5_ifc_set_fte_in_bits {
2780 u8 opcode[0x10];
2781 u8 reserved_0[0x10];
2782
2783 u8 reserved_1[0x10];
2784 u8 op_mod[0x10];
2785
2786 u8 reserved_2[0x40];
2787
2788 u8 table_type[0x8];
2789 u8 reserved_3[0x18];
2790
2791 u8 reserved_4[0x8];
2792 u8 table_id[0x18];
2793
2794 u8 reserved_5[0x40];
2795
2796 u8 flow_index[0x20];
2797
2798 u8 reserved_6[0xe0];
2799
2800 struct mlx5_ifc_flow_context_bits flow_context;
2801};
2802
2803struct mlx5_ifc_rts2rts_qp_out_bits {
2804 u8 status[0x8];
2805 u8 reserved_0[0x18];
2806
2807 u8 syndrome[0x20];
2808
2809 u8 reserved_1[0x40];
2810};
2811
2812struct mlx5_ifc_rts2rts_qp_in_bits {
2813 u8 opcode[0x10];
2814 u8 reserved_0[0x10];
2815
2816 u8 reserved_1[0x10];
2817 u8 op_mod[0x10];
2818
2819 u8 reserved_2[0x8];
2820 u8 qpn[0x18];
2821
2822 u8 reserved_3[0x20];
2823
2824 u8 opt_param_mask[0x20];
2825
2826 u8 reserved_4[0x20];
2827
2828 struct mlx5_ifc_qpc_bits qpc;
2829
2830 u8 reserved_5[0x80];
2831};
2832
2833struct mlx5_ifc_rtr2rts_qp_out_bits {
2834 u8 status[0x8];
2835 u8 reserved_0[0x18];
2836
2837 u8 syndrome[0x20];
2838
2839 u8 reserved_1[0x40];
2840};
2841
2842struct mlx5_ifc_rtr2rts_qp_in_bits {
2843 u8 opcode[0x10];
2844 u8 reserved_0[0x10];
2845
2846 u8 reserved_1[0x10];
2847 u8 op_mod[0x10];
2848
2849 u8 reserved_2[0x8];
2850 u8 qpn[0x18];
2851
2852 u8 reserved_3[0x20];
2853
2854 u8 opt_param_mask[0x20];
2855
2856 u8 reserved_4[0x20];
2857
2858 struct mlx5_ifc_qpc_bits qpc;
2859
2860 u8 reserved_5[0x80];
2861};
2862
2863struct mlx5_ifc_rst2init_qp_out_bits {
2864 u8 status[0x8];
2865 u8 reserved_0[0x18];
2866
2867 u8 syndrome[0x20];
2868
2869 u8 reserved_1[0x40];
2870};
2871
2872struct mlx5_ifc_rst2init_qp_in_bits {
2873 u8 opcode[0x10];
2874 u8 reserved_0[0x10];
2875
2876 u8 reserved_1[0x10];
2877 u8 op_mod[0x10];
2878
2879 u8 reserved_2[0x8];
2880 u8 qpn[0x18];
2881
2882 u8 reserved_3[0x20];
2883
2884 u8 opt_param_mask[0x20];
2885
2886 u8 reserved_4[0x20];
2887
2888 struct mlx5_ifc_qpc_bits qpc;
2889
2890 u8 reserved_5[0x80];
2891};
2892
2893struct mlx5_ifc_query_xrc_srq_out_bits {
2894 u8 status[0x8];
2895 u8 reserved_0[0x18];
2896
2897 u8 syndrome[0x20];
2898
2899 u8 reserved_1[0x40];
2900
2901 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
2902
2903 u8 reserved_2[0x600];
2904
2905 u8 pas[0][0x40];
2906};
2907
2908struct mlx5_ifc_query_xrc_srq_in_bits {
2909 u8 opcode[0x10];
2910 u8 reserved_0[0x10];
2911
2912 u8 reserved_1[0x10];
2913 u8 op_mod[0x10];
2914
2915 u8 reserved_2[0x8];
2916 u8 xrc_srqn[0x18];
2917
2918 u8 reserved_3[0x20];
2919};
2920
2921enum {
2922 MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0,
2923 MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1,
2924};
2925
2926struct mlx5_ifc_query_vport_state_out_bits {
2927 u8 status[0x8];
2928 u8 reserved_0[0x18];
2929
2930 u8 syndrome[0x20];
2931
2932 u8 reserved_1[0x20];
2933
2934 u8 reserved_2[0x18];
2935 u8 admin_state[0x4];
2936 u8 state[0x4];
2937};
2938
2939enum {
2940 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
2941};
2942
2943struct mlx5_ifc_query_vport_state_in_bits {
2944 u8 opcode[0x10];
2945 u8 reserved_0[0x10];
2946
2947 u8 reserved_1[0x10];
2948 u8 op_mod[0x10];
2949
2950 u8 other_vport[0x1];
2951 u8 reserved_2[0xf];
2952 u8 vport_number[0x10];
2953
2954 u8 reserved_3[0x20];
2955};
2956
2957struct mlx5_ifc_query_vport_counter_out_bits {
2958 u8 status[0x8];
2959 u8 reserved_0[0x18];
2960
2961 u8 syndrome[0x20];
2962
2963 u8 reserved_1[0x40];
2964
2965 struct mlx5_ifc_traffic_counter_bits received_errors;
2966
2967 struct mlx5_ifc_traffic_counter_bits transmit_errors;
2968
2969 struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
2970
2971 struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
2972
2973 struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
2974
2975 struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
2976
2977 struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
2978
2979 struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
2980
2981 struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
2982
2983 struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
2984
2985 struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
2986
2987 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
2988
2989 u8 reserved_2[0xa00];
2990};
2991
2992enum {
2993 MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0,
2994};
2995
2996struct mlx5_ifc_query_vport_counter_in_bits {
2997 u8 opcode[0x10];
2998 u8 reserved_0[0x10];
2999
3000 u8 reserved_1[0x10];
3001 u8 op_mod[0x10];
3002
3003 u8 other_vport[0x1];
3004 u8 reserved_2[0xf];
3005 u8 vport_number[0x10];
3006
3007 u8 reserved_3[0x60];
3008
3009 u8 clear[0x1];
3010 u8 reserved_4[0x1f];
3011
3012 u8 reserved_5[0x20];
3013};
3014
3015struct mlx5_ifc_query_tis_out_bits {
3016 u8 status[0x8];
3017 u8 reserved_0[0x18];
3018
3019 u8 syndrome[0x20];
3020
3021 u8 reserved_1[0x40];
3022
3023 struct mlx5_ifc_tisc_bits tis_context;
3024};
3025
3026struct mlx5_ifc_query_tis_in_bits {
3027 u8 opcode[0x10];
3028 u8 reserved_0[0x10];
3029
3030 u8 reserved_1[0x10];
3031 u8 op_mod[0x10];
3032
3033 u8 reserved_2[0x8];
3034 u8 tisn[0x18];
3035
3036 u8 reserved_3[0x20];
3037};
3038
3039struct mlx5_ifc_query_tir_out_bits {
3040 u8 status[0x8];
3041 u8 reserved_0[0x18];
3042
3043 u8 syndrome[0x20];
3044
3045 u8 reserved_1[0xc0];
3046
3047 struct mlx5_ifc_tirc_bits tir_context;
3048};
3049
3050struct mlx5_ifc_query_tir_in_bits {
3051 u8 opcode[0x10];
3052 u8 reserved_0[0x10];
3053
3054 u8 reserved_1[0x10];
3055 u8 op_mod[0x10];
3056
3057 u8 reserved_2[0x8];
3058 u8 tirn[0x18];
3059
3060 u8 reserved_3[0x20];
3061};
3062
3063struct mlx5_ifc_query_srq_out_bits {
3064 u8 status[0x8];
3065 u8 reserved_0[0x18];
3066
3067 u8 syndrome[0x20];
3068
3069 u8 reserved_1[0x40];
3070
3071 struct mlx5_ifc_srqc_bits srq_context_entry;
3072
3073 u8 reserved_2[0x600];
3074
3075 u8 pas[0][0x40];
3076};
3077
3078struct mlx5_ifc_query_srq_in_bits {
3079 u8 opcode[0x10];
3080 u8 reserved_0[0x10];
3081
3082 u8 reserved_1[0x10];
3083 u8 op_mod[0x10];
3084
3085 u8 reserved_2[0x8];
3086 u8 srqn[0x18];
3087
3088 u8 reserved_3[0x20];
3089};
3090
3091struct mlx5_ifc_query_sq_out_bits {
3092 u8 status[0x8];
3093 u8 reserved_0[0x18];
3094
3095 u8 syndrome[0x20];
3096
3097 u8 reserved_1[0xc0];
3098
3099 struct mlx5_ifc_sqc_bits sq_context;
3100};
3101
3102struct mlx5_ifc_query_sq_in_bits {
3103 u8 opcode[0x10];
3104 u8 reserved_0[0x10];
3105
3106 u8 reserved_1[0x10];
3107 u8 op_mod[0x10];
3108
3109 u8 reserved_2[0x8];
3110 u8 sqn[0x18];
3111
3112 u8 reserved_3[0x20];
3113};
3114
3115struct mlx5_ifc_query_special_contexts_out_bits {
3116 u8 status[0x8];
3117 u8 reserved_0[0x18];
3118
3119 u8 syndrome[0x20];
3120
3121 u8 reserved_1[0x20];
3122
3123 u8 resd_lkey[0x20];
3124};
3125
3126struct mlx5_ifc_query_special_contexts_in_bits {
3127 u8 opcode[0x10];
3128 u8 reserved_0[0x10];
3129
3130 u8 reserved_1[0x10];
3131 u8 op_mod[0x10];
3132
3133 u8 reserved_2[0x40];
3134};
3135
3136struct mlx5_ifc_query_rqt_out_bits {
3137 u8 status[0x8];
3138 u8 reserved_0[0x18];
3139
3140 u8 syndrome[0x20];
3141
3142 u8 reserved_1[0xc0];
3143
3144 struct mlx5_ifc_rqtc_bits rqt_context;
3145};
3146
3147struct mlx5_ifc_query_rqt_in_bits {
3148 u8 opcode[0x10];
3149 u8 reserved_0[0x10];
3150
3151 u8 reserved_1[0x10];
3152 u8 op_mod[0x10];
3153
3154 u8 reserved_2[0x8];
3155 u8 rqtn[0x18];
3156
3157 u8 reserved_3[0x20];
3158};
3159
3160struct mlx5_ifc_query_rq_out_bits {
3161 u8 status[0x8];
3162 u8 reserved_0[0x18];
3163
3164 u8 syndrome[0x20];
3165
3166 u8 reserved_1[0xc0];
3167
3168 struct mlx5_ifc_rqc_bits rq_context;
3169};
3170
3171struct mlx5_ifc_query_rq_in_bits {
3172 u8 opcode[0x10];
3173 u8 reserved_0[0x10];
3174
3175 u8 reserved_1[0x10];
3176 u8 op_mod[0x10];
3177
3178 u8 reserved_2[0x8];
3179 u8 rqn[0x18];
3180
3181 u8 reserved_3[0x20];
3182};
3183
3184struct mlx5_ifc_query_roce_address_out_bits {
3185 u8 status[0x8];
3186 u8 reserved_0[0x18];
3187
3188 u8 syndrome[0x20];
3189
3190 u8 reserved_1[0x40];
3191
3192 struct mlx5_ifc_roce_addr_layout_bits roce_address;
3193};
3194
3195struct mlx5_ifc_query_roce_address_in_bits {
3196 u8 opcode[0x10];
3197 u8 reserved_0[0x10];
3198
3199 u8 reserved_1[0x10];
3200 u8 op_mod[0x10];
3201
3202 u8 roce_address_index[0x10];
3203 u8 reserved_2[0x10];
3204
3205 u8 reserved_3[0x20];
3206};
3207
3208struct mlx5_ifc_query_rmp_out_bits {
3209 u8 status[0x8];
3210 u8 reserved_0[0x18];
3211
3212 u8 syndrome[0x20];
3213
3214 u8 reserved_1[0xc0];
3215
3216 struct mlx5_ifc_rmpc_bits rmp_context;
3217};
3218
3219struct mlx5_ifc_query_rmp_in_bits {
3220 u8 opcode[0x10];
3221 u8 reserved_0[0x10];
3222
3223 u8 reserved_1[0x10];
3224 u8 op_mod[0x10];
3225
3226 u8 reserved_2[0x8];
3227 u8 rmpn[0x18];
3228
3229 u8 reserved_3[0x20];
3230};
3231
3232struct mlx5_ifc_query_qp_out_bits {
3233 u8 status[0x8];
3234 u8 reserved_0[0x18];
3235
3236 u8 syndrome[0x20];
3237
3238 u8 reserved_1[0x40];
3239
3240 u8 opt_param_mask[0x20];
3241
3242 u8 reserved_2[0x20];
3243
3244 struct mlx5_ifc_qpc_bits qpc;
3245
3246 u8 reserved_3[0x80];
3247
3248 u8 pas[0][0x40];
3249};
3250
3251struct mlx5_ifc_query_qp_in_bits {
3252 u8 opcode[0x10];
3253 u8 reserved_0[0x10];
3254
3255 u8 reserved_1[0x10];
3256 u8 op_mod[0x10];
3257
3258 u8 reserved_2[0x8];
3259 u8 qpn[0x18];
3260
3261 u8 reserved_3[0x20];
3262};
3263
3264struct mlx5_ifc_query_q_counter_out_bits {
3265 u8 status[0x8];
3266 u8 reserved_0[0x18];
3267
3268 u8 syndrome[0x20];
3269
3270 u8 reserved_1[0x40];
3271
3272 u8 rx_write_requests[0x20];
3273
3274 u8 reserved_2[0x20];
3275
3276 u8 rx_read_requests[0x20];
3277
3278 u8 reserved_3[0x20];
3279
3280 u8 rx_atomic_requests[0x20];
3281
3282 u8 reserved_4[0x20];
3283
3284 u8 rx_dct_connect[0x20];
3285
3286 u8 reserved_5[0x20];
3287
3288 u8 out_of_buffer[0x20];
3289
3290 u8 reserved_6[0x20];
3291
3292 u8 out_of_sequence[0x20];
3293
3294 u8 reserved_7[0x620];
3295};
3296
3297struct mlx5_ifc_query_q_counter_in_bits {
3298 u8 opcode[0x10];
3299 u8 reserved_0[0x10];
3300
3301 u8 reserved_1[0x10];
3302 u8 op_mod[0x10];
3303
3304 u8 reserved_2[0x80];
3305
3306 u8 clear[0x1];
3307 u8 reserved_3[0x1f];
3308
3309 u8 reserved_4[0x18];
3310 u8 counter_set_id[0x8];
3311};
3312
3313struct mlx5_ifc_query_pages_out_bits {
3314 u8 status[0x8];
3315 u8 reserved_0[0x18];
3316
3317 u8 syndrome[0x20];
3318
3319 u8 reserved_1[0x10];
3320 u8 function_id[0x10];
3321
3322 u8 num_pages[0x20];
3323};
3324
3325enum {
3326 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1,
3327 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2,
3328 MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3,
3329};
3330
3331struct mlx5_ifc_query_pages_in_bits {
3332 u8 opcode[0x10];
3333 u8 reserved_0[0x10];
3334
3335 u8 reserved_1[0x10];
3336 u8 op_mod[0x10];
3337
3338 u8 reserved_2[0x10];
3339 u8 function_id[0x10];
3340
3341 u8 reserved_3[0x20];
3342};
3343
3344struct mlx5_ifc_query_nic_vport_context_out_bits {
3345 u8 status[0x8];
3346 u8 reserved_0[0x18];
3347
3348 u8 syndrome[0x20];
3349
3350 u8 reserved_1[0x40];
3351
3352 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
3353};
3354
3355struct mlx5_ifc_query_nic_vport_context_in_bits {
3356 u8 opcode[0x10];
3357 u8 reserved_0[0x10];
3358
3359 u8 reserved_1[0x10];
3360 u8 op_mod[0x10];
3361
3362 u8 other_vport[0x1];
3363 u8 reserved_2[0xf];
3364 u8 vport_number[0x10];
3365
3366 u8 reserved_3[0x5];
3367 u8 allowed_list_type[0x3];
3368 u8 reserved_4[0x18];
3369};
3370
3371struct mlx5_ifc_query_mkey_out_bits {
3372 u8 status[0x8];
3373 u8 reserved_0[0x18];
3374
3375 u8 syndrome[0x20];
3376
3377 u8 reserved_1[0x40];
3378
3379 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
3380
3381 u8 reserved_2[0x600];
3382
3383 u8 bsf0_klm0_pas_mtt0_1[16][0x8];
3384
3385 u8 bsf1_klm1_pas_mtt2_3[16][0x8];
3386};
3387
3388struct mlx5_ifc_query_mkey_in_bits {
3389 u8 opcode[0x10];
3390 u8 reserved_0[0x10];
3391
3392 u8 reserved_1[0x10];
3393 u8 op_mod[0x10];
3394
3395 u8 reserved_2[0x8];
3396 u8 mkey_index[0x18];
3397
3398 u8 pg_access[0x1];
3399 u8 reserved_3[0x1f];
3400};
3401
3402struct mlx5_ifc_query_mad_demux_out_bits {
3403 u8 status[0x8];
3404 u8 reserved_0[0x18];
3405
3406 u8 syndrome[0x20];
3407
3408 u8 reserved_1[0x40];
3409
3410 u8 mad_dumux_parameters_block[0x20];
3411};
3412
3413struct mlx5_ifc_query_mad_demux_in_bits {
320 u8 opcode[0x10]; 3414 u8 opcode[0x10];
321 u8 reserved_0[0x10]; 3415 u8 reserved_0[0x10];
322 3416
@@ -326,6 +3420,146 @@ struct mlx5_ifc_query_hca_cap_in_bits {
326 u8 reserved_2[0x40]; 3420 u8 reserved_2[0x40];
327}; 3421};
328 3422
3423struct mlx5_ifc_query_l2_table_entry_out_bits {
3424 u8 status[0x8];
3425 u8 reserved_0[0x18];
3426
3427 u8 syndrome[0x20];
3428
3429 u8 reserved_1[0xa0];
3430
3431 u8 reserved_2[0x13];
3432 u8 vlan_valid[0x1];
3433 u8 vlan[0xc];
3434
3435 struct mlx5_ifc_mac_address_layout_bits mac_address;
3436
3437 u8 reserved_3[0xc0];
3438};
3439
3440struct mlx5_ifc_query_l2_table_entry_in_bits {
3441 u8 opcode[0x10];
3442 u8 reserved_0[0x10];
3443
3444 u8 reserved_1[0x10];
3445 u8 op_mod[0x10];
3446
3447 u8 reserved_2[0x60];
3448
3449 u8 reserved_3[0x8];
3450 u8 table_index[0x18];
3451
3452 u8 reserved_4[0x140];
3453};
3454
3455struct mlx5_ifc_query_issi_out_bits {
3456 u8 status[0x8];
3457 u8 reserved_0[0x18];
3458
3459 u8 syndrome[0x20];
3460
3461 u8 reserved_1[0x10];
3462 u8 current_issi[0x10];
3463
3464 u8 reserved_2[0xa0];
3465
3466 u8 supported_issi_reserved[76][0x8];
3467 u8 supported_issi_dw0[0x20];
3468};
3469
3470struct mlx5_ifc_query_issi_in_bits {
3471 u8 opcode[0x10];
3472 u8 reserved_0[0x10];
3473
3474 u8 reserved_1[0x10];
3475 u8 op_mod[0x10];
3476
3477 u8 reserved_2[0x40];
3478};
3479
3480struct mlx5_ifc_query_hca_vport_pkey_out_bits {
3481 u8 status[0x8];
3482 u8 reserved_0[0x18];
3483
3484 u8 syndrome[0x20];
3485
3486 u8 reserved_1[0x40];
3487
3488 struct mlx5_ifc_pkey_bits pkey[0];
3489};
3490
3491struct mlx5_ifc_query_hca_vport_pkey_in_bits {
3492 u8 opcode[0x10];
3493 u8 reserved_0[0x10];
3494
3495 u8 reserved_1[0x10];
3496 u8 op_mod[0x10];
3497
3498 u8 other_vport[0x1];
3499 u8 reserved_2[0xb];
3500 u8 port_num[0x4];
3501 u8 vport_number[0x10];
3502
3503 u8 reserved_3[0x10];
3504 u8 pkey_index[0x10];
3505};
3506
3507struct mlx5_ifc_query_hca_vport_gid_out_bits {
3508 u8 status[0x8];
3509 u8 reserved_0[0x18];
3510
3511 u8 syndrome[0x20];
3512
3513 u8 reserved_1[0x20];
3514
3515 u8 gids_num[0x10];
3516 u8 reserved_2[0x10];
3517
3518 struct mlx5_ifc_array128_auto_bits gid[0];
3519};
3520
3521struct mlx5_ifc_query_hca_vport_gid_in_bits {
3522 u8 opcode[0x10];
3523 u8 reserved_0[0x10];
3524
3525 u8 reserved_1[0x10];
3526 u8 op_mod[0x10];
3527
3528 u8 other_vport[0x1];
3529 u8 reserved_2[0xb];
3530 u8 port_num[0x4];
3531 u8 vport_number[0x10];
3532
3533 u8 reserved_3[0x10];
3534 u8 gid_index[0x10];
3535};
3536
3537struct mlx5_ifc_query_hca_vport_context_out_bits {
3538 u8 status[0x8];
3539 u8 reserved_0[0x18];
3540
3541 u8 syndrome[0x20];
3542
3543 u8 reserved_1[0x40];
3544
3545 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
3546};
3547
3548struct mlx5_ifc_query_hca_vport_context_in_bits {
3549 u8 opcode[0x10];
3550 u8 reserved_0[0x10];
3551
3552 u8 reserved_1[0x10];
3553 u8 op_mod[0x10];
3554
3555 u8 other_vport[0x1];
3556 u8 reserved_2[0xb];
3557 u8 port_num[0x4];
3558 u8 vport_number[0x10];
3559
3560 u8 reserved_3[0x20];
3561};
3562
329struct mlx5_ifc_query_hca_cap_out_bits { 3563struct mlx5_ifc_query_hca_cap_out_bits {
330 u8 status[0x8]; 3564 u8 status[0x8];
331 u8 reserved_0[0x18]; 3565 u8 reserved_0[0x18];
@@ -334,16 +3568,3216 @@ struct mlx5_ifc_query_hca_cap_out_bits {
334 3568
335 u8 reserved_1[0x40]; 3569 u8 reserved_1[0x40];
336 3570
337 u8 capability_struct[256][0x8]; 3571 union mlx5_ifc_hca_cap_union_bits capability;
338}; 3572};
339 3573
340struct mlx5_ifc_set_hca_cap_out_bits { 3574struct mlx5_ifc_query_hca_cap_in_bits {
3575 u8 opcode[0x10];
3576 u8 reserved_0[0x10];
3577
3578 u8 reserved_1[0x10];
3579 u8 op_mod[0x10];
3580
3581 u8 reserved_2[0x40];
3582};
3583
3584struct mlx5_ifc_query_flow_table_out_bits {
3585 u8 status[0x8];
3586 u8 reserved_0[0x18];
3587
3588 u8 syndrome[0x20];
3589
3590 u8 reserved_1[0x80];
3591
3592 u8 reserved_2[0x8];
3593 u8 level[0x8];
3594 u8 reserved_3[0x8];
3595 u8 log_size[0x8];
3596
3597 u8 reserved_4[0x120];
3598};
3599
3600struct mlx5_ifc_query_flow_table_in_bits {
3601 u8 opcode[0x10];
3602 u8 reserved_0[0x10];
3603
3604 u8 reserved_1[0x10];
3605 u8 op_mod[0x10];
3606
3607 u8 reserved_2[0x40];
3608
3609 u8 table_type[0x8];
3610 u8 reserved_3[0x18];
3611
3612 u8 reserved_4[0x8];
3613 u8 table_id[0x18];
3614
3615 u8 reserved_5[0x140];
3616};
3617
3618struct mlx5_ifc_query_fte_out_bits {
3619 u8 status[0x8];
3620 u8 reserved_0[0x18];
3621
3622 u8 syndrome[0x20];
3623
3624 u8 reserved_1[0x1c0];
3625
3626 struct mlx5_ifc_flow_context_bits flow_context;
3627};
3628
3629struct mlx5_ifc_query_fte_in_bits {
3630 u8 opcode[0x10];
3631 u8 reserved_0[0x10];
3632
3633 u8 reserved_1[0x10];
3634 u8 op_mod[0x10];
3635
3636 u8 reserved_2[0x40];
3637
3638 u8 table_type[0x8];
3639 u8 reserved_3[0x18];
3640
3641 u8 reserved_4[0x8];
3642 u8 table_id[0x18];
3643
3644 u8 reserved_5[0x40];
3645
3646 u8 flow_index[0x20];
3647
3648 u8 reserved_6[0xe0];
3649};
3650
3651enum {
3652 MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
3653 MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
3654 MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
3655};
3656
3657struct mlx5_ifc_query_flow_group_out_bits {
3658 u8 status[0x8];
3659 u8 reserved_0[0x18];
3660
3661 u8 syndrome[0x20];
3662
3663 u8 reserved_1[0xa0];
3664
3665 u8 start_flow_index[0x20];
3666
3667 u8 reserved_2[0x20];
3668
3669 u8 end_flow_index[0x20];
3670
3671 u8 reserved_3[0xa0];
3672
3673 u8 reserved_4[0x18];
3674 u8 match_criteria_enable[0x8];
3675
3676 struct mlx5_ifc_fte_match_param_bits match_criteria;
3677
3678 u8 reserved_5[0xe00];
3679};
3680
3681struct mlx5_ifc_query_flow_group_in_bits {
3682 u8 opcode[0x10];
3683 u8 reserved_0[0x10];
3684
3685 u8 reserved_1[0x10];
3686 u8 op_mod[0x10];
3687
3688 u8 reserved_2[0x40];
3689
3690 u8 table_type[0x8];
3691 u8 reserved_3[0x18];
3692
3693 u8 reserved_4[0x8];
3694 u8 table_id[0x18];
3695
3696 u8 group_id[0x20];
3697
3698 u8 reserved_5[0x120];
3699};
3700
3701struct mlx5_ifc_query_eq_out_bits {
3702 u8 status[0x8];
3703 u8 reserved_0[0x18];
3704
3705 u8 syndrome[0x20];
3706
3707 u8 reserved_1[0x40];
3708
3709 struct mlx5_ifc_eqc_bits eq_context_entry;
3710
3711 u8 reserved_2[0x40];
3712
3713 u8 event_bitmask[0x40];
3714
3715 u8 reserved_3[0x580];
3716
3717 u8 pas[0][0x40];
3718};
3719
3720struct mlx5_ifc_query_eq_in_bits {
3721 u8 opcode[0x10];
3722 u8 reserved_0[0x10];
3723
3724 u8 reserved_1[0x10];
3725 u8 op_mod[0x10];
3726
3727 u8 reserved_2[0x18];
3728 u8 eq_number[0x8];
3729
3730 u8 reserved_3[0x20];
3731};
3732
3733struct mlx5_ifc_query_dct_out_bits {
3734 u8 status[0x8];
3735 u8 reserved_0[0x18];
3736
3737 u8 syndrome[0x20];
3738
3739 u8 reserved_1[0x40];
3740
3741 struct mlx5_ifc_dctc_bits dct_context_entry;
3742
3743 u8 reserved_2[0x180];
3744};
3745
3746struct mlx5_ifc_query_dct_in_bits {
3747 u8 opcode[0x10];
3748 u8 reserved_0[0x10];
3749
3750 u8 reserved_1[0x10];
3751 u8 op_mod[0x10];
3752
3753 u8 reserved_2[0x8];
3754 u8 dctn[0x18];
3755
3756 u8 reserved_3[0x20];
3757};
3758
3759struct mlx5_ifc_query_cq_out_bits {
341 u8 status[0x8]; 3760 u8 status[0x8];
342 u8 reserved_0[0x18]; 3761 u8 reserved_0[0x18];
343 3762
344 u8 syndrome[0x20]; 3763 u8 syndrome[0x20];
345 3764
346 u8 reserved_1[0x40]; 3765 u8 reserved_1[0x40];
3766
3767 struct mlx5_ifc_cqc_bits cq_context;
3768
3769 u8 reserved_2[0x600];
3770
3771 u8 pas[0][0x40];
3772};
3773
3774struct mlx5_ifc_query_cq_in_bits {
3775 u8 opcode[0x10];
3776 u8 reserved_0[0x10];
3777
3778 u8 reserved_1[0x10];
3779 u8 op_mod[0x10];
3780
3781 u8 reserved_2[0x8];
3782 u8 cqn[0x18];
3783
3784 u8 reserved_3[0x20];
3785};
3786
3787struct mlx5_ifc_query_cong_status_out_bits {
3788 u8 status[0x8];
3789 u8 reserved_0[0x18];
3790
3791 u8 syndrome[0x20];
3792
3793 u8 reserved_1[0x20];
3794
3795 u8 enable[0x1];
3796 u8 tag_enable[0x1];
3797 u8 reserved_2[0x1e];
3798};
3799
3800struct mlx5_ifc_query_cong_status_in_bits {
3801 u8 opcode[0x10];
3802 u8 reserved_0[0x10];
3803
3804 u8 reserved_1[0x10];
3805 u8 op_mod[0x10];
3806
3807 u8 reserved_2[0x18];
3808 u8 priority[0x4];
3809 u8 cong_protocol[0x4];
3810
3811 u8 reserved_3[0x20];
3812};
3813
3814struct mlx5_ifc_query_cong_statistics_out_bits {
3815 u8 status[0x8];
3816 u8 reserved_0[0x18];
3817
3818 u8 syndrome[0x20];
3819
3820 u8 reserved_1[0x40];
3821
3822 u8 cur_flows[0x20];
3823
3824 u8 sum_flows[0x20];
3825
3826 u8 cnp_ignored_high[0x20];
3827
3828 u8 cnp_ignored_low[0x20];
3829
3830 u8 cnp_handled_high[0x20];
3831
3832 u8 cnp_handled_low[0x20];
3833
3834 u8 reserved_2[0x100];
3835
3836 u8 time_stamp_high[0x20];
3837
3838 u8 time_stamp_low[0x20];
3839
3840 u8 accumulators_period[0x20];
3841
3842 u8 ecn_marked_roce_packets_high[0x20];
3843
3844 u8 ecn_marked_roce_packets_low[0x20];
3845
3846 u8 cnps_sent_high[0x20];
3847
3848 u8 cnps_sent_low[0x20];
3849
3850 u8 reserved_3[0x560];
3851};
3852
3853struct mlx5_ifc_query_cong_statistics_in_bits {
3854 u8 opcode[0x10];
3855 u8 reserved_0[0x10];
3856
3857 u8 reserved_1[0x10];
3858 u8 op_mod[0x10];
3859
3860 u8 clear[0x1];
3861 u8 reserved_2[0x1f];
3862
3863 u8 reserved_3[0x20];
3864};
3865
3866struct mlx5_ifc_query_cong_params_out_bits {
3867 u8 status[0x8];
3868 u8 reserved_0[0x18];
3869
3870 u8 syndrome[0x20];
3871
3872 u8 reserved_1[0x40];
3873
3874 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
3875};
3876
3877struct mlx5_ifc_query_cong_params_in_bits {
3878 u8 opcode[0x10];
3879 u8 reserved_0[0x10];
3880
3881 u8 reserved_1[0x10];
3882 u8 op_mod[0x10];
3883
3884 u8 reserved_2[0x1c];
3885 u8 cong_protocol[0x4];
3886
3887 u8 reserved_3[0x20];
3888};
3889
3890struct mlx5_ifc_query_adapter_out_bits {
3891 u8 status[0x8];
3892 u8 reserved_0[0x18];
3893
3894 u8 syndrome[0x20];
3895
3896 u8 reserved_1[0x40];
3897
3898 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
3899};
3900
3901struct mlx5_ifc_query_adapter_in_bits {
3902 u8 opcode[0x10];
3903 u8 reserved_0[0x10];
3904
3905 u8 reserved_1[0x10];
3906 u8 op_mod[0x10];
3907
3908 u8 reserved_2[0x40];
3909};
3910
3911struct mlx5_ifc_qp_2rst_out_bits {
3912 u8 status[0x8];
3913 u8 reserved_0[0x18];
3914
3915 u8 syndrome[0x20];
3916
3917 u8 reserved_1[0x40];
3918};
3919
3920struct mlx5_ifc_qp_2rst_in_bits {
3921 u8 opcode[0x10];
3922 u8 reserved_0[0x10];
3923
3924 u8 reserved_1[0x10];
3925 u8 op_mod[0x10];
3926
3927 u8 reserved_2[0x8];
3928 u8 qpn[0x18];
3929
3930 u8 reserved_3[0x20];
3931};
3932
3933struct mlx5_ifc_qp_2err_out_bits {
3934 u8 status[0x8];
3935 u8 reserved_0[0x18];
3936
3937 u8 syndrome[0x20];
3938
3939 u8 reserved_1[0x40];
3940};
3941
3942struct mlx5_ifc_qp_2err_in_bits {
3943 u8 opcode[0x10];
3944 u8 reserved_0[0x10];
3945
3946 u8 reserved_1[0x10];
3947 u8 op_mod[0x10];
3948
3949 u8 reserved_2[0x8];
3950 u8 qpn[0x18];
3951
3952 u8 reserved_3[0x20];
3953};
3954
3955struct mlx5_ifc_page_fault_resume_out_bits {
3956 u8 status[0x8];
3957 u8 reserved_0[0x18];
3958
3959 u8 syndrome[0x20];
3960
3961 u8 reserved_1[0x40];
3962};
3963
3964struct mlx5_ifc_page_fault_resume_in_bits {
3965 u8 opcode[0x10];
3966 u8 reserved_0[0x10];
3967
3968 u8 reserved_1[0x10];
3969 u8 op_mod[0x10];
3970
3971 u8 error[0x1];
3972 u8 reserved_2[0x4];
3973 u8 rdma[0x1];
3974 u8 read_write[0x1];
3975 u8 req_res[0x1];
3976 u8 qpn[0x18];
3977
3978 u8 reserved_3[0x20];
3979};
3980
3981struct mlx5_ifc_nop_out_bits {
3982 u8 status[0x8];
3983 u8 reserved_0[0x18];
3984
3985 u8 syndrome[0x20];
3986
3987 u8 reserved_1[0x40];
3988};
3989
3990struct mlx5_ifc_nop_in_bits {
3991 u8 opcode[0x10];
3992 u8 reserved_0[0x10];
3993
3994 u8 reserved_1[0x10];
3995 u8 op_mod[0x10];
3996
3997 u8 reserved_2[0x40];
3998};
3999
4000struct mlx5_ifc_modify_vport_state_out_bits {
4001 u8 status[0x8];
4002 u8 reserved_0[0x18];
4003
4004 u8 syndrome[0x20];
4005
4006 u8 reserved_1[0x40];
4007};
4008
4009struct mlx5_ifc_modify_vport_state_in_bits {
4010 u8 opcode[0x10];
4011 u8 reserved_0[0x10];
4012
4013 u8 reserved_1[0x10];
4014 u8 op_mod[0x10];
4015
4016 u8 other_vport[0x1];
4017 u8 reserved_2[0xf];
4018 u8 vport_number[0x10];
4019
4020 u8 reserved_3[0x18];
4021 u8 admin_state[0x4];
4022 u8 reserved_4[0x4];
4023};
4024
4025struct mlx5_ifc_modify_tis_out_bits {
4026 u8 status[0x8];
4027 u8 reserved_0[0x18];
4028
4029 u8 syndrome[0x20];
4030
4031 u8 reserved_1[0x40];
4032};
4033
4034struct mlx5_ifc_modify_tis_in_bits {
4035 u8 opcode[0x10];
4036 u8 reserved_0[0x10];
4037
4038 u8 reserved_1[0x10];
4039 u8 op_mod[0x10];
4040
4041 u8 reserved_2[0x8];
4042 u8 tisn[0x18];
4043
4044 u8 reserved_3[0x20];
4045
4046 u8 modify_bitmask[0x40];
4047
4048 u8 reserved_4[0x40];
4049
4050 struct mlx5_ifc_tisc_bits ctx;
4051};
4052
4053struct mlx5_ifc_modify_tir_out_bits {
4054 u8 status[0x8];
4055 u8 reserved_0[0x18];
4056
4057 u8 syndrome[0x20];
4058
4059 u8 reserved_1[0x40];
4060};
4061
4062struct mlx5_ifc_modify_tir_in_bits {
4063 u8 opcode[0x10];
4064 u8 reserved_0[0x10];
4065
4066 u8 reserved_1[0x10];
4067 u8 op_mod[0x10];
4068
4069 u8 reserved_2[0x8];
4070 u8 tirn[0x18];
4071
4072 u8 reserved_3[0x20];
4073
4074 u8 modify_bitmask[0x40];
4075
4076 u8 reserved_4[0x40];
4077
4078 struct mlx5_ifc_tirc_bits ctx;
4079};
4080
4081struct mlx5_ifc_modify_sq_out_bits {
4082 u8 status[0x8];
4083 u8 reserved_0[0x18];
4084
4085 u8 syndrome[0x20];
4086
4087 u8 reserved_1[0x40];
4088};
4089
4090struct mlx5_ifc_modify_sq_in_bits {
4091 u8 opcode[0x10];
4092 u8 reserved_0[0x10];
4093
4094 u8 reserved_1[0x10];
4095 u8 op_mod[0x10];
4096
4097 u8 sq_state[0x4];
4098 u8 reserved_2[0x4];
4099 u8 sqn[0x18];
4100
4101 u8 reserved_3[0x20];
4102
4103 u8 modify_bitmask[0x40];
4104
4105 u8 reserved_4[0x40];
4106
4107 struct mlx5_ifc_sqc_bits ctx;
4108};
4109
4110struct mlx5_ifc_modify_rqt_out_bits {
4111 u8 status[0x8];
4112 u8 reserved_0[0x18];
4113
4114 u8 syndrome[0x20];
4115
4116 u8 reserved_1[0x40];
4117};
4118
4119struct mlx5_ifc_modify_rqt_in_bits {
4120 u8 opcode[0x10];
4121 u8 reserved_0[0x10];
4122
4123 u8 reserved_1[0x10];
4124 u8 op_mod[0x10];
4125
4126 u8 reserved_2[0x8];
4127 u8 rqtn[0x18];
4128
4129 u8 reserved_3[0x20];
4130
4131 u8 modify_bitmask[0x40];
4132
4133 u8 reserved_4[0x40];
4134
4135 struct mlx5_ifc_rqtc_bits ctx;
4136};
4137
4138struct mlx5_ifc_modify_rq_out_bits {
4139 u8 status[0x8];
4140 u8 reserved_0[0x18];
4141
4142 u8 syndrome[0x20];
4143
4144 u8 reserved_1[0x40];
4145};
4146
4147struct mlx5_ifc_modify_rq_in_bits {
4148 u8 opcode[0x10];
4149 u8 reserved_0[0x10];
4150
4151 u8 reserved_1[0x10];
4152 u8 op_mod[0x10];
4153
4154 u8 rq_state[0x4];
4155 u8 reserved_2[0x4];
4156 u8 rqn[0x18];
4157
4158 u8 reserved_3[0x20];
4159
4160 u8 modify_bitmask[0x40];
4161
4162 u8 reserved_4[0x40];
4163
4164 struct mlx5_ifc_rqc_bits ctx;
4165};
4166
4167struct mlx5_ifc_modify_rmp_out_bits {
4168 u8 status[0x8];
4169 u8 reserved_0[0x18];
4170
4171 u8 syndrome[0x20];
4172
4173 u8 reserved_1[0x40];
4174};
4175
4176struct mlx5_ifc_rmp_bitmask_bits {
4177 u8 reserved[0x20];
4178
4179 u8 reserved1[0x1f];
4180 u8 lwm[0x1];
4181};
4182
4183struct mlx5_ifc_modify_rmp_in_bits {
4184 u8 opcode[0x10];
4185 u8 reserved_0[0x10];
4186
4187 u8 reserved_1[0x10];
4188 u8 op_mod[0x10];
4189
4190 u8 rmp_state[0x4];
4191 u8 reserved_2[0x4];
4192 u8 rmpn[0x18];
4193
4194 u8 reserved_3[0x20];
4195
4196 struct mlx5_ifc_rmp_bitmask_bits bitmask;
4197
4198 u8 reserved_4[0x40];
4199
4200 struct mlx5_ifc_rmpc_bits ctx;
4201};
4202
4203struct mlx5_ifc_modify_nic_vport_context_out_bits {
4204 u8 status[0x8];
4205 u8 reserved_0[0x18];
4206
4207 u8 syndrome[0x20];
4208
4209 u8 reserved_1[0x40];
4210};
4211
4212struct mlx5_ifc_modify_nic_vport_field_select_bits {
4213 u8 reserved_0[0x1c];
4214 u8 permanent_address[0x1];
4215 u8 addresses_list[0x1];
4216 u8 roce_en[0x1];
4217 u8 reserved_1[0x1];
4218};
4219
4220struct mlx5_ifc_modify_nic_vport_context_in_bits {
4221 u8 opcode[0x10];
4222 u8 reserved_0[0x10];
4223
4224 u8 reserved_1[0x10];
4225 u8 op_mod[0x10];
4226
4227 u8 other_vport[0x1];
4228 u8 reserved_2[0xf];
4229 u8 vport_number[0x10];
4230
4231 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
4232
4233 u8 reserved_3[0x780];
4234
4235 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
4236};
4237
4238struct mlx5_ifc_modify_hca_vport_context_out_bits {
4239 u8 status[0x8];
4240 u8 reserved_0[0x18];
4241
4242 u8 syndrome[0x20];
4243
4244 u8 reserved_1[0x40];
4245};
4246
4247struct mlx5_ifc_modify_hca_vport_context_in_bits {
4248 u8 opcode[0x10];
4249 u8 reserved_0[0x10];
4250
4251 u8 reserved_1[0x10];
4252 u8 op_mod[0x10];
4253
4254 u8 other_vport[0x1];
4255 u8 reserved_2[0xb];
4256 u8 port_num[0x4];
4257 u8 vport_number[0x10];
4258
4259 u8 reserved_3[0x20];
4260
4261 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
4262};
4263
4264struct mlx5_ifc_modify_cq_out_bits {
4265 u8 status[0x8];
4266 u8 reserved_0[0x18];
4267
4268 u8 syndrome[0x20];
4269
4270 u8 reserved_1[0x40];
4271};
4272
4273enum {
4274 MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0,
4275 MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1,
4276};
4277
4278struct mlx5_ifc_modify_cq_in_bits {
4279 u8 opcode[0x10];
4280 u8 reserved_0[0x10];
4281
4282 u8 reserved_1[0x10];
4283 u8 op_mod[0x10];
4284
4285 u8 reserved_2[0x8];
4286 u8 cqn[0x18];
4287
4288 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
4289
4290 struct mlx5_ifc_cqc_bits cq_context;
4291
4292 u8 reserved_3[0x600];
4293
4294 u8 pas[0][0x40];
4295};
4296
4297struct mlx5_ifc_modify_cong_status_out_bits {
4298 u8 status[0x8];
4299 u8 reserved_0[0x18];
4300
4301 u8 syndrome[0x20];
4302
4303 u8 reserved_1[0x40];
4304};
4305
4306struct mlx5_ifc_modify_cong_status_in_bits {
4307 u8 opcode[0x10];
4308 u8 reserved_0[0x10];
4309
4310 u8 reserved_1[0x10];
4311 u8 op_mod[0x10];
4312
4313 u8 reserved_2[0x18];
4314 u8 priority[0x4];
4315 u8 cong_protocol[0x4];
4316
4317 u8 enable[0x1];
4318 u8 tag_enable[0x1];
4319 u8 reserved_3[0x1e];
4320};
4321
4322struct mlx5_ifc_modify_cong_params_out_bits {
4323 u8 status[0x8];
4324 u8 reserved_0[0x18];
4325
4326 u8 syndrome[0x20];
4327
4328 u8 reserved_1[0x40];
4329};
4330
4331struct mlx5_ifc_modify_cong_params_in_bits {
4332 u8 opcode[0x10];
4333 u8 reserved_0[0x10];
4334
4335 u8 reserved_1[0x10];
4336 u8 op_mod[0x10];
4337
4338 u8 reserved_2[0x1c];
4339 u8 cong_protocol[0x4];
4340
4341 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
4342
4343 u8 reserved_3[0x80];
4344
4345 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4346};
4347
4348struct mlx5_ifc_manage_pages_out_bits {
4349 u8 status[0x8];
4350 u8 reserved_0[0x18];
4351
4352 u8 syndrome[0x20];
4353
4354 u8 output_num_entries[0x20];
4355
4356 u8 reserved_1[0x20];
4357
4358 u8 pas[0][0x40];
4359};
4360
4361enum {
4362 MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0,
4363 MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1,
4364 MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2,
4365};
4366
4367struct mlx5_ifc_manage_pages_in_bits {
4368 u8 opcode[0x10];
4369 u8 reserved_0[0x10];
4370
4371 u8 reserved_1[0x10];
4372 u8 op_mod[0x10];
4373
4374 u8 reserved_2[0x10];
4375 u8 function_id[0x10];
4376
4377 u8 input_num_entries[0x20];
4378
4379 u8 pas[0][0x40];
4380};
4381
4382struct mlx5_ifc_mad_ifc_out_bits {
4383 u8 status[0x8];
4384 u8 reserved_0[0x18];
4385
4386 u8 syndrome[0x20];
4387
4388 u8 reserved_1[0x40];
4389
4390 u8 response_mad_packet[256][0x8];
4391};
4392
4393struct mlx5_ifc_mad_ifc_in_bits {
4394 u8 opcode[0x10];
4395 u8 reserved_0[0x10];
4396
4397 u8 reserved_1[0x10];
4398 u8 op_mod[0x10];
4399
4400 u8 remote_lid[0x10];
4401 u8 reserved_2[0x8];
4402 u8 port[0x8];
4403
4404 u8 reserved_3[0x20];
4405
4406 u8 mad[256][0x8];
4407};
4408
4409struct mlx5_ifc_init_hca_out_bits {
4410 u8 status[0x8];
4411 u8 reserved_0[0x18];
4412
4413 u8 syndrome[0x20];
4414
4415 u8 reserved_1[0x40];
4416};
4417
4418struct mlx5_ifc_init_hca_in_bits {
4419 u8 opcode[0x10];
4420 u8 reserved_0[0x10];
4421
4422 u8 reserved_1[0x10];
4423 u8 op_mod[0x10];
4424
4425 u8 reserved_2[0x40];
4426};
4427
4428struct mlx5_ifc_init2rtr_qp_out_bits {
4429 u8 status[0x8];
4430 u8 reserved_0[0x18];
4431
4432 u8 syndrome[0x20];
4433
4434 u8 reserved_1[0x40];
4435};
4436
4437struct mlx5_ifc_init2rtr_qp_in_bits {
4438 u8 opcode[0x10];
4439 u8 reserved_0[0x10];
4440
4441 u8 reserved_1[0x10];
4442 u8 op_mod[0x10];
4443
4444 u8 reserved_2[0x8];
4445 u8 qpn[0x18];
4446
4447 u8 reserved_3[0x20];
4448
4449 u8 opt_param_mask[0x20];
4450
4451 u8 reserved_4[0x20];
4452
4453 struct mlx5_ifc_qpc_bits qpc;
4454
4455 u8 reserved_5[0x80];
4456};
4457
4458struct mlx5_ifc_init2init_qp_out_bits {
4459 u8 status[0x8];
4460 u8 reserved_0[0x18];
4461
4462 u8 syndrome[0x20];
4463
4464 u8 reserved_1[0x40];
4465};
4466
4467struct mlx5_ifc_init2init_qp_in_bits {
4468 u8 opcode[0x10];
4469 u8 reserved_0[0x10];
4470
4471 u8 reserved_1[0x10];
4472 u8 op_mod[0x10];
4473
4474 u8 reserved_2[0x8];
4475 u8 qpn[0x18];
4476
4477 u8 reserved_3[0x20];
4478
4479 u8 opt_param_mask[0x20];
4480
4481 u8 reserved_4[0x20];
4482
4483 struct mlx5_ifc_qpc_bits qpc;
4484
4485 u8 reserved_5[0x80];
4486};
4487
4488struct mlx5_ifc_get_dropped_packet_log_out_bits {
4489 u8 status[0x8];
4490 u8 reserved_0[0x18];
4491
4492 u8 syndrome[0x20];
4493
4494 u8 reserved_1[0x40];
4495
4496 u8 packet_headers_log[128][0x8];
4497
4498 u8 packet_syndrome[64][0x8];
4499};
4500
4501struct mlx5_ifc_get_dropped_packet_log_in_bits {
4502 u8 opcode[0x10];
4503 u8 reserved_0[0x10];
4504
4505 u8 reserved_1[0x10];
4506 u8 op_mod[0x10];
4507
4508 u8 reserved_2[0x40];
4509};
4510
4511struct mlx5_ifc_gen_eqe_in_bits {
4512 u8 opcode[0x10];
4513 u8 reserved_0[0x10];
4514
4515 u8 reserved_1[0x10];
4516 u8 op_mod[0x10];
4517
4518 u8 reserved_2[0x18];
4519 u8 eq_number[0x8];
4520
4521 u8 reserved_3[0x20];
4522
4523 u8 eqe[64][0x8];
4524};
4525
4526struct mlx5_ifc_gen_eq_out_bits {
4527 u8 status[0x8];
4528 u8 reserved_0[0x18];
4529
4530 u8 syndrome[0x20];
4531
4532 u8 reserved_1[0x40];
4533};
4534
4535struct mlx5_ifc_enable_hca_out_bits {
4536 u8 status[0x8];
4537 u8 reserved_0[0x18];
4538
4539 u8 syndrome[0x20];
4540
4541 u8 reserved_1[0x20];
4542};
4543
4544struct mlx5_ifc_enable_hca_in_bits {
4545 u8 opcode[0x10];
4546 u8 reserved_0[0x10];
4547
4548 u8 reserved_1[0x10];
4549 u8 op_mod[0x10];
4550
4551 u8 reserved_2[0x10];
4552 u8 function_id[0x10];
4553
4554 u8 reserved_3[0x20];
4555};
4556
4557struct mlx5_ifc_drain_dct_out_bits {
4558 u8 status[0x8];
4559 u8 reserved_0[0x18];
4560
4561 u8 syndrome[0x20];
4562
4563 u8 reserved_1[0x40];
4564};
4565
4566struct mlx5_ifc_drain_dct_in_bits {
4567 u8 opcode[0x10];
4568 u8 reserved_0[0x10];
4569
4570 u8 reserved_1[0x10];
4571 u8 op_mod[0x10];
4572
4573 u8 reserved_2[0x8];
4574 u8 dctn[0x18];
4575
4576 u8 reserved_3[0x20];
4577};
4578
4579struct mlx5_ifc_disable_hca_out_bits {
4580 u8 status[0x8];
4581 u8 reserved_0[0x18];
4582
4583 u8 syndrome[0x20];
4584
4585 u8 reserved_1[0x20];
4586};
4587
4588struct mlx5_ifc_disable_hca_in_bits {
4589 u8 opcode[0x10];
4590 u8 reserved_0[0x10];
4591
4592 u8 reserved_1[0x10];
4593 u8 op_mod[0x10];
4594
4595 u8 reserved_2[0x10];
4596 u8 function_id[0x10];
4597
4598 u8 reserved_3[0x20];
4599};
4600
4601struct mlx5_ifc_detach_from_mcg_out_bits {
4602 u8 status[0x8];
4603 u8 reserved_0[0x18];
4604
4605 u8 syndrome[0x20];
4606
4607 u8 reserved_1[0x40];
4608};
4609
4610struct mlx5_ifc_detach_from_mcg_in_bits {
4611 u8 opcode[0x10];
4612 u8 reserved_0[0x10];
4613
4614 u8 reserved_1[0x10];
4615 u8 op_mod[0x10];
4616
4617 u8 reserved_2[0x8];
4618 u8 qpn[0x18];
4619
4620 u8 reserved_3[0x20];
4621
4622 u8 multicast_gid[16][0x8];
4623};
4624
4625struct mlx5_ifc_destroy_xrc_srq_out_bits {
4626 u8 status[0x8];
4627 u8 reserved_0[0x18];
4628
4629 u8 syndrome[0x20];
4630
4631 u8 reserved_1[0x40];
4632};
4633
4634struct mlx5_ifc_destroy_xrc_srq_in_bits {
4635 u8 opcode[0x10];
4636 u8 reserved_0[0x10];
4637
4638 u8 reserved_1[0x10];
4639 u8 op_mod[0x10];
4640
4641 u8 reserved_2[0x8];
4642 u8 xrc_srqn[0x18];
4643
4644 u8 reserved_3[0x20];
4645};
4646
4647struct mlx5_ifc_destroy_tis_out_bits {
4648 u8 status[0x8];
4649 u8 reserved_0[0x18];
4650
4651 u8 syndrome[0x20];
4652
4653 u8 reserved_1[0x40];
4654};
4655
4656struct mlx5_ifc_destroy_tis_in_bits {
4657 u8 opcode[0x10];
4658 u8 reserved_0[0x10];
4659
4660 u8 reserved_1[0x10];
4661 u8 op_mod[0x10];
4662
4663 u8 reserved_2[0x8];
4664 u8 tisn[0x18];
4665
4666 u8 reserved_3[0x20];
4667};
4668
4669struct mlx5_ifc_destroy_tir_out_bits {
4670 u8 status[0x8];
4671 u8 reserved_0[0x18];
4672
4673 u8 syndrome[0x20];
4674
4675 u8 reserved_1[0x40];
4676};
4677
4678struct mlx5_ifc_destroy_tir_in_bits {
4679 u8 opcode[0x10];
4680 u8 reserved_0[0x10];
4681
4682 u8 reserved_1[0x10];
4683 u8 op_mod[0x10];
4684
4685 u8 reserved_2[0x8];
4686 u8 tirn[0x18];
4687
4688 u8 reserved_3[0x20];
4689};
4690
4691struct mlx5_ifc_destroy_srq_out_bits {
4692 u8 status[0x8];
4693 u8 reserved_0[0x18];
4694
4695 u8 syndrome[0x20];
4696
4697 u8 reserved_1[0x40];
4698};
4699
4700struct mlx5_ifc_destroy_srq_in_bits {
4701 u8 opcode[0x10];
4702 u8 reserved_0[0x10];
4703
4704 u8 reserved_1[0x10];
4705 u8 op_mod[0x10];
4706
4707 u8 reserved_2[0x8];
4708 u8 srqn[0x18];
4709
4710 u8 reserved_3[0x20];
4711};
4712
4713struct mlx5_ifc_destroy_sq_out_bits {
4714 u8 status[0x8];
4715 u8 reserved_0[0x18];
4716
4717 u8 syndrome[0x20];
4718
4719 u8 reserved_1[0x40];
4720};
4721
4722struct mlx5_ifc_destroy_sq_in_bits {
4723 u8 opcode[0x10];
4724 u8 reserved_0[0x10];
4725
4726 u8 reserved_1[0x10];
4727 u8 op_mod[0x10];
4728
4729 u8 reserved_2[0x8];
4730 u8 sqn[0x18];
4731
4732 u8 reserved_3[0x20];
4733};
4734
4735struct mlx5_ifc_destroy_rqt_out_bits {
4736 u8 status[0x8];
4737 u8 reserved_0[0x18];
4738
4739 u8 syndrome[0x20];
4740
4741 u8 reserved_1[0x40];
4742};
4743
4744struct mlx5_ifc_destroy_rqt_in_bits {
4745 u8 opcode[0x10];
4746 u8 reserved_0[0x10];
4747
4748 u8 reserved_1[0x10];
4749 u8 op_mod[0x10];
4750
4751 u8 reserved_2[0x8];
4752 u8 rqtn[0x18];
4753
4754 u8 reserved_3[0x20];
4755};
4756
4757struct mlx5_ifc_destroy_rq_out_bits {
4758 u8 status[0x8];
4759 u8 reserved_0[0x18];
4760
4761 u8 syndrome[0x20];
4762
4763 u8 reserved_1[0x40];
4764};
4765
4766struct mlx5_ifc_destroy_rq_in_bits {
4767 u8 opcode[0x10];
4768 u8 reserved_0[0x10];
4769
4770 u8 reserved_1[0x10];
4771 u8 op_mod[0x10];
4772
4773 u8 reserved_2[0x8];
4774 u8 rqn[0x18];
4775
4776 u8 reserved_3[0x20];
4777};
4778
4779struct mlx5_ifc_destroy_rmp_out_bits {
4780 u8 status[0x8];
4781 u8 reserved_0[0x18];
4782
4783 u8 syndrome[0x20];
4784
4785 u8 reserved_1[0x40];
4786};
4787
4788struct mlx5_ifc_destroy_rmp_in_bits {
4789 u8 opcode[0x10];
4790 u8 reserved_0[0x10];
4791
4792 u8 reserved_1[0x10];
4793 u8 op_mod[0x10];
4794
4795 u8 reserved_2[0x8];
4796 u8 rmpn[0x18];
4797
4798 u8 reserved_3[0x20];
4799};
4800
4801struct mlx5_ifc_destroy_qp_out_bits {
4802 u8 status[0x8];
4803 u8 reserved_0[0x18];
4804
4805 u8 syndrome[0x20];
4806
4807 u8 reserved_1[0x40];
4808};
4809
4810struct mlx5_ifc_destroy_qp_in_bits {
4811 u8 opcode[0x10];
4812 u8 reserved_0[0x10];
4813
4814 u8 reserved_1[0x10];
4815 u8 op_mod[0x10];
4816
4817 u8 reserved_2[0x8];
4818 u8 qpn[0x18];
4819
4820 u8 reserved_3[0x20];
4821};
4822
4823struct mlx5_ifc_destroy_psv_out_bits {
4824 u8 status[0x8];
4825 u8 reserved_0[0x18];
4826
4827 u8 syndrome[0x20];
4828
4829 u8 reserved_1[0x40];
4830};
4831
4832struct mlx5_ifc_destroy_psv_in_bits {
4833 u8 opcode[0x10];
4834 u8 reserved_0[0x10];
4835
4836 u8 reserved_1[0x10];
4837 u8 op_mod[0x10];
4838
4839 u8 reserved_2[0x8];
4840 u8 psvn[0x18];
4841
4842 u8 reserved_3[0x20];
4843};
4844
4845struct mlx5_ifc_destroy_mkey_out_bits {
4846 u8 status[0x8];
4847 u8 reserved_0[0x18];
4848
4849 u8 syndrome[0x20];
4850
4851 u8 reserved_1[0x40];
4852};
4853
4854struct mlx5_ifc_destroy_mkey_in_bits {
4855 u8 opcode[0x10];
4856 u8 reserved_0[0x10];
4857
4858 u8 reserved_1[0x10];
4859 u8 op_mod[0x10];
4860
4861 u8 reserved_2[0x8];
4862 u8 mkey_index[0x18];
4863
4864 u8 reserved_3[0x20];
4865};
4866
4867struct mlx5_ifc_destroy_flow_table_out_bits {
4868 u8 status[0x8];
4869 u8 reserved_0[0x18];
4870
4871 u8 syndrome[0x20];
4872
4873 u8 reserved_1[0x40];
4874};
4875
4876struct mlx5_ifc_destroy_flow_table_in_bits {
4877 u8 opcode[0x10];
4878 u8 reserved_0[0x10];
4879
4880 u8 reserved_1[0x10];
4881 u8 op_mod[0x10];
4882
4883 u8 reserved_2[0x40];
4884
4885 u8 table_type[0x8];
4886 u8 reserved_3[0x18];
4887
4888 u8 reserved_4[0x8];
4889 u8 table_id[0x18];
4890
4891 u8 reserved_5[0x140];
4892};
4893
4894struct mlx5_ifc_destroy_flow_group_out_bits {
4895 u8 status[0x8];
4896 u8 reserved_0[0x18];
4897
4898 u8 syndrome[0x20];
4899
4900 u8 reserved_1[0x40];
4901};
4902
4903struct mlx5_ifc_destroy_flow_group_in_bits {
4904 u8 opcode[0x10];
4905 u8 reserved_0[0x10];
4906
4907 u8 reserved_1[0x10];
4908 u8 op_mod[0x10];
4909
4910 u8 reserved_2[0x40];
4911
4912 u8 table_type[0x8];
4913 u8 reserved_3[0x18];
4914
4915 u8 reserved_4[0x8];
4916 u8 table_id[0x18];
4917
4918 u8 group_id[0x20];
4919
4920 u8 reserved_5[0x120];
4921};
4922
4923struct mlx5_ifc_destroy_eq_out_bits {
4924 u8 status[0x8];
4925 u8 reserved_0[0x18];
4926
4927 u8 syndrome[0x20];
4928
4929 u8 reserved_1[0x40];
4930};
4931
4932struct mlx5_ifc_destroy_eq_in_bits {
4933 u8 opcode[0x10];
4934 u8 reserved_0[0x10];
4935
4936 u8 reserved_1[0x10];
4937 u8 op_mod[0x10];
4938
4939 u8 reserved_2[0x18];
4940 u8 eq_number[0x8];
4941
4942 u8 reserved_3[0x20];
4943};
4944
4945struct mlx5_ifc_destroy_dct_out_bits {
4946 u8 status[0x8];
4947 u8 reserved_0[0x18];
4948
4949 u8 syndrome[0x20];
4950
4951 u8 reserved_1[0x40];
4952};
4953
4954struct mlx5_ifc_destroy_dct_in_bits {
4955 u8 opcode[0x10];
4956 u8 reserved_0[0x10];
4957
4958 u8 reserved_1[0x10];
4959 u8 op_mod[0x10];
4960
4961 u8 reserved_2[0x8];
4962 u8 dctn[0x18];
4963
4964 u8 reserved_3[0x20];
4965};
4966
4967struct mlx5_ifc_destroy_cq_out_bits {
4968 u8 status[0x8];
4969 u8 reserved_0[0x18];
4970
4971 u8 syndrome[0x20];
4972
4973 u8 reserved_1[0x40];
4974};
4975
4976struct mlx5_ifc_destroy_cq_in_bits {
4977 u8 opcode[0x10];
4978 u8 reserved_0[0x10];
4979
4980 u8 reserved_1[0x10];
4981 u8 op_mod[0x10];
4982
4983 u8 reserved_2[0x8];
4984 u8 cqn[0x18];
4985
4986 u8 reserved_3[0x20];
4987};
4988
4989struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
4990 u8 status[0x8];
4991 u8 reserved_0[0x18];
4992
4993 u8 syndrome[0x20];
4994
4995 u8 reserved_1[0x40];
4996};
4997
4998struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
4999 u8 opcode[0x10];
5000 u8 reserved_0[0x10];
5001
5002 u8 reserved_1[0x10];
5003 u8 op_mod[0x10];
5004
5005 u8 reserved_2[0x20];
5006
5007 u8 reserved_3[0x10];
5008 u8 vxlan_udp_port[0x10];
5009};
5010
5011struct mlx5_ifc_delete_l2_table_entry_out_bits {
5012 u8 status[0x8];
5013 u8 reserved_0[0x18];
5014
5015 u8 syndrome[0x20];
5016
5017 u8 reserved_1[0x40];
5018};
5019
5020struct mlx5_ifc_delete_l2_table_entry_in_bits {
5021 u8 opcode[0x10];
5022 u8 reserved_0[0x10];
5023
5024 u8 reserved_1[0x10];
5025 u8 op_mod[0x10];
5026
5027 u8 reserved_2[0x60];
5028
5029 u8 reserved_3[0x8];
5030 u8 table_index[0x18];
5031
5032 u8 reserved_4[0x140];
5033};
5034
5035struct mlx5_ifc_delete_fte_out_bits {
5036 u8 status[0x8];
5037 u8 reserved_0[0x18];
5038
5039 u8 syndrome[0x20];
5040
5041 u8 reserved_1[0x40];
5042};
5043
5044struct mlx5_ifc_delete_fte_in_bits {
5045 u8 opcode[0x10];
5046 u8 reserved_0[0x10];
5047
5048 u8 reserved_1[0x10];
5049 u8 op_mod[0x10];
5050
5051 u8 reserved_2[0x40];
5052
5053 u8 table_type[0x8];
5054 u8 reserved_3[0x18];
5055
5056 u8 reserved_4[0x8];
5057 u8 table_id[0x18];
5058
5059 u8 reserved_5[0x40];
5060
5061 u8 flow_index[0x20];
5062
5063 u8 reserved_6[0xe0];
5064};
5065
5066struct mlx5_ifc_dealloc_xrcd_out_bits {
5067 u8 status[0x8];
5068 u8 reserved_0[0x18];
5069
5070 u8 syndrome[0x20];
5071
5072 u8 reserved_1[0x40];
5073};
5074
5075struct mlx5_ifc_dealloc_xrcd_in_bits {
5076 u8 opcode[0x10];
5077 u8 reserved_0[0x10];
5078
5079 u8 reserved_1[0x10];
5080 u8 op_mod[0x10];
5081
5082 u8 reserved_2[0x8];
5083 u8 xrcd[0x18];
5084
5085 u8 reserved_3[0x20];
5086};
5087
5088struct mlx5_ifc_dealloc_uar_out_bits {
5089 u8 status[0x8];
5090 u8 reserved_0[0x18];
5091
5092 u8 syndrome[0x20];
5093
5094 u8 reserved_1[0x40];
5095};
5096
5097struct mlx5_ifc_dealloc_uar_in_bits {
5098 u8 opcode[0x10];
5099 u8 reserved_0[0x10];
5100
5101 u8 reserved_1[0x10];
5102 u8 op_mod[0x10];
5103
5104 u8 reserved_2[0x8];
5105 u8 uar[0x18];
5106
5107 u8 reserved_3[0x20];
5108};
5109
5110struct mlx5_ifc_dealloc_transport_domain_out_bits {
5111 u8 status[0x8];
5112 u8 reserved_0[0x18];
5113
5114 u8 syndrome[0x20];
5115
5116 u8 reserved_1[0x40];
5117};
5118
5119struct mlx5_ifc_dealloc_transport_domain_in_bits {
5120 u8 opcode[0x10];
5121 u8 reserved_0[0x10];
5122
5123 u8 reserved_1[0x10];
5124 u8 op_mod[0x10];
5125
5126 u8 reserved_2[0x8];
5127 u8 transport_domain[0x18];
5128
5129 u8 reserved_3[0x20];
5130};
5131
5132struct mlx5_ifc_dealloc_q_counter_out_bits {
5133 u8 status[0x8];
5134 u8 reserved_0[0x18];
5135
5136 u8 syndrome[0x20];
5137
5138 u8 reserved_1[0x40];
5139};
5140
5141struct mlx5_ifc_dealloc_q_counter_in_bits {
5142 u8 opcode[0x10];
5143 u8 reserved_0[0x10];
5144
5145 u8 reserved_1[0x10];
5146 u8 op_mod[0x10];
5147
5148 u8 reserved_2[0x18];
5149 u8 counter_set_id[0x8];
5150
5151 u8 reserved_3[0x20];
5152};
5153
5154struct mlx5_ifc_dealloc_pd_out_bits {
5155 u8 status[0x8];
5156 u8 reserved_0[0x18];
5157
5158 u8 syndrome[0x20];
5159
5160 u8 reserved_1[0x40];
5161};
5162
5163struct mlx5_ifc_dealloc_pd_in_bits {
5164 u8 opcode[0x10];
5165 u8 reserved_0[0x10];
5166
5167 u8 reserved_1[0x10];
5168 u8 op_mod[0x10];
5169
5170 u8 reserved_2[0x8];
5171 u8 pd[0x18];
5172
5173 u8 reserved_3[0x20];
5174};
5175
5176struct mlx5_ifc_create_xrc_srq_out_bits {
5177 u8 status[0x8];
5178 u8 reserved_0[0x18];
5179
5180 u8 syndrome[0x20];
5181
5182 u8 reserved_1[0x8];
5183 u8 xrc_srqn[0x18];
5184
5185 u8 reserved_2[0x20];
5186};
5187
5188struct mlx5_ifc_create_xrc_srq_in_bits {
5189 u8 opcode[0x10];
5190 u8 reserved_0[0x10];
5191
5192 u8 reserved_1[0x10];
5193 u8 op_mod[0x10];
5194
5195 u8 reserved_2[0x40];
5196
5197 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
5198
5199 u8 reserved_3[0x600];
5200
5201 u8 pas[0][0x40];
5202};
5203
5204struct mlx5_ifc_create_tis_out_bits {
5205 u8 status[0x8];
5206 u8 reserved_0[0x18];
5207
5208 u8 syndrome[0x20];
5209
5210 u8 reserved_1[0x8];
5211 u8 tisn[0x18];
5212
5213 u8 reserved_2[0x20];
5214};
5215
5216struct mlx5_ifc_create_tis_in_bits {
5217 u8 opcode[0x10];
5218 u8 reserved_0[0x10];
5219
5220 u8 reserved_1[0x10];
5221 u8 op_mod[0x10];
5222
5223 u8 reserved_2[0xc0];
5224
5225 struct mlx5_ifc_tisc_bits ctx;
5226};
5227
5228struct mlx5_ifc_create_tir_out_bits {
5229 u8 status[0x8];
5230 u8 reserved_0[0x18];
5231
5232 u8 syndrome[0x20];
5233
5234 u8 reserved_1[0x8];
5235 u8 tirn[0x18];
5236
5237 u8 reserved_2[0x20];
5238};
5239
5240struct mlx5_ifc_create_tir_in_bits {
5241 u8 opcode[0x10];
5242 u8 reserved_0[0x10];
5243
5244 u8 reserved_1[0x10];
5245 u8 op_mod[0x10];
5246
5247 u8 reserved_2[0xc0];
5248
5249 struct mlx5_ifc_tirc_bits ctx;
5250};
5251
5252struct mlx5_ifc_create_srq_out_bits {
5253 u8 status[0x8];
5254 u8 reserved_0[0x18];
5255
5256 u8 syndrome[0x20];
5257
5258 u8 reserved_1[0x8];
5259 u8 srqn[0x18];
5260
5261 u8 reserved_2[0x20];
5262};
5263
5264struct mlx5_ifc_create_srq_in_bits {
5265 u8 opcode[0x10];
5266 u8 reserved_0[0x10];
5267
5268 u8 reserved_1[0x10];
5269 u8 op_mod[0x10];
5270
5271 u8 reserved_2[0x40];
5272
5273 struct mlx5_ifc_srqc_bits srq_context_entry;
5274
5275 u8 reserved_3[0x600];
5276
5277 u8 pas[0][0x40];
5278};
5279
5280struct mlx5_ifc_create_sq_out_bits {
5281 u8 status[0x8];
5282 u8 reserved_0[0x18];
5283
5284 u8 syndrome[0x20];
5285
5286 u8 reserved_1[0x8];
5287 u8 sqn[0x18];
5288
5289 u8 reserved_2[0x20];
5290};
5291
5292struct mlx5_ifc_create_sq_in_bits {
5293 u8 opcode[0x10];
5294 u8 reserved_0[0x10];
5295
5296 u8 reserved_1[0x10];
5297 u8 op_mod[0x10];
5298
5299 u8 reserved_2[0xc0];
5300
5301 struct mlx5_ifc_sqc_bits ctx;
5302};
5303
5304struct mlx5_ifc_create_rqt_out_bits {
5305 u8 status[0x8];
5306 u8 reserved_0[0x18];
5307
5308 u8 syndrome[0x20];
5309
5310 u8 reserved_1[0x8];
5311 u8 rqtn[0x18];
5312
5313 u8 reserved_2[0x20];
5314};
5315
5316struct mlx5_ifc_create_rqt_in_bits {
5317 u8 opcode[0x10];
5318 u8 reserved_0[0x10];
5319
5320 u8 reserved_1[0x10];
5321 u8 op_mod[0x10];
5322
5323 u8 reserved_2[0xc0];
5324
5325 struct mlx5_ifc_rqtc_bits rqt_context;
5326};
5327
5328struct mlx5_ifc_create_rq_out_bits {
5329 u8 status[0x8];
5330 u8 reserved_0[0x18];
5331
5332 u8 syndrome[0x20];
5333
5334 u8 reserved_1[0x8];
5335 u8 rqn[0x18];
5336
5337 u8 reserved_2[0x20];
5338};
5339
5340struct mlx5_ifc_create_rq_in_bits {
5341 u8 opcode[0x10];
5342 u8 reserved_0[0x10];
5343
5344 u8 reserved_1[0x10];
5345 u8 op_mod[0x10];
5346
5347 u8 reserved_2[0xc0];
5348
5349 struct mlx5_ifc_rqc_bits ctx;
5350};
5351
5352struct mlx5_ifc_create_rmp_out_bits {
5353 u8 status[0x8];
5354 u8 reserved_0[0x18];
5355
5356 u8 syndrome[0x20];
5357
5358 u8 reserved_1[0x8];
5359 u8 rmpn[0x18];
5360
5361 u8 reserved_2[0x20];
5362};
5363
5364struct mlx5_ifc_create_rmp_in_bits {
5365 u8 opcode[0x10];
5366 u8 reserved_0[0x10];
5367
5368 u8 reserved_1[0x10];
5369 u8 op_mod[0x10];
5370
5371 u8 reserved_2[0xc0];
5372
5373 struct mlx5_ifc_rmpc_bits ctx;
5374};
5375
5376struct mlx5_ifc_create_qp_out_bits {
5377 u8 status[0x8];
5378 u8 reserved_0[0x18];
5379
5380 u8 syndrome[0x20];
5381
5382 u8 reserved_1[0x8];
5383 u8 qpn[0x18];
5384
5385 u8 reserved_2[0x20];
5386};
5387
5388struct mlx5_ifc_create_qp_in_bits {
5389 u8 opcode[0x10];
5390 u8 reserved_0[0x10];
5391
5392 u8 reserved_1[0x10];
5393 u8 op_mod[0x10];
5394
5395 u8 reserved_2[0x40];
5396
5397 u8 opt_param_mask[0x20];
5398
5399 u8 reserved_3[0x20];
5400
5401 struct mlx5_ifc_qpc_bits qpc;
5402
5403 u8 reserved_4[0x80];
5404
5405 u8 pas[0][0x40];
5406};
5407
5408struct mlx5_ifc_create_psv_out_bits {
5409 u8 status[0x8];
5410 u8 reserved_0[0x18];
5411
5412 u8 syndrome[0x20];
5413
5414 u8 reserved_1[0x40];
5415
5416 u8 reserved_2[0x8];
5417 u8 psv0_index[0x18];
5418
5419 u8 reserved_3[0x8];
5420 u8 psv1_index[0x18];
5421
5422 u8 reserved_4[0x8];
5423 u8 psv2_index[0x18];
5424
5425 u8 reserved_5[0x8];
5426 u8 psv3_index[0x18];
5427};
5428
5429struct mlx5_ifc_create_psv_in_bits {
5430 u8 opcode[0x10];
5431 u8 reserved_0[0x10];
5432
5433 u8 reserved_1[0x10];
5434 u8 op_mod[0x10];
5435
5436 u8 num_psv[0x4];
5437 u8 reserved_2[0x4];
5438 u8 pd[0x18];
5439
5440 u8 reserved_3[0x20];
5441};
5442
5443struct mlx5_ifc_create_mkey_out_bits {
5444 u8 status[0x8];
5445 u8 reserved_0[0x18];
5446
5447 u8 syndrome[0x20];
5448
5449 u8 reserved_1[0x8];
5450 u8 mkey_index[0x18];
5451
5452 u8 reserved_2[0x20];
5453};
5454
5455struct mlx5_ifc_create_mkey_in_bits {
5456 u8 opcode[0x10];
5457 u8 reserved_0[0x10];
5458
5459 u8 reserved_1[0x10];
5460 u8 op_mod[0x10];
5461
5462 u8 reserved_2[0x20];
5463
5464 u8 pg_access[0x1];
5465 u8 reserved_3[0x1f];
5466
5467 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
5468
5469 u8 reserved_4[0x80];
5470
5471 u8 translations_octword_actual_size[0x20];
5472
5473 u8 reserved_5[0x560];
5474
5475 u8 klm_pas_mtt[0][0x20];
5476};
5477
5478struct mlx5_ifc_create_flow_table_out_bits {
5479 u8 status[0x8];
5480 u8 reserved_0[0x18];
5481
5482 u8 syndrome[0x20];
5483
5484 u8 reserved_1[0x8];
5485 u8 table_id[0x18];
5486
5487 u8 reserved_2[0x20];
5488};
5489
5490struct mlx5_ifc_create_flow_table_in_bits {
5491 u8 opcode[0x10];
5492 u8 reserved_0[0x10];
5493
5494 u8 reserved_1[0x10];
5495 u8 op_mod[0x10];
5496
5497 u8 reserved_2[0x40];
5498
5499 u8 table_type[0x8];
5500 u8 reserved_3[0x18];
5501
5502 u8 reserved_4[0x20];
5503
5504 u8 reserved_5[0x8];
5505 u8 level[0x8];
5506 u8 reserved_6[0x8];
5507 u8 log_size[0x8];
5508
5509 u8 reserved_7[0x120];
5510};
5511
5512struct mlx5_ifc_create_flow_group_out_bits {
5513 u8 status[0x8];
5514 u8 reserved_0[0x18];
5515
5516 u8 syndrome[0x20];
5517
5518 u8 reserved_1[0x8];
5519 u8 group_id[0x18];
5520
5521 u8 reserved_2[0x20];
5522};
5523
5524enum {
5525 MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
5526 MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
5527 MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
5528};
5529
5530struct mlx5_ifc_create_flow_group_in_bits {
5531 u8 opcode[0x10];
5532 u8 reserved_0[0x10];
5533
5534 u8 reserved_1[0x10];
5535 u8 op_mod[0x10];
5536
5537 u8 reserved_2[0x40];
5538
5539 u8 table_type[0x8];
5540 u8 reserved_3[0x18];
5541
5542 u8 reserved_4[0x8];
5543 u8 table_id[0x18];
5544
5545 u8 reserved_5[0x20];
5546
5547 u8 start_flow_index[0x20];
5548
5549 u8 reserved_6[0x20];
5550
5551 u8 end_flow_index[0x20];
5552
5553 u8 reserved_7[0xa0];
5554
5555 u8 reserved_8[0x18];
5556 u8 match_criteria_enable[0x8];
5557
5558 struct mlx5_ifc_fte_match_param_bits match_criteria;
5559
5560 u8 reserved_9[0xe00];
5561};
5562
5563struct mlx5_ifc_create_eq_out_bits {
5564 u8 status[0x8];
5565 u8 reserved_0[0x18];
5566
5567 u8 syndrome[0x20];
5568
5569 u8 reserved_1[0x18];
5570 u8 eq_number[0x8];
5571
5572 u8 reserved_2[0x20];
5573};
5574
5575struct mlx5_ifc_create_eq_in_bits {
5576 u8 opcode[0x10];
5577 u8 reserved_0[0x10];
5578
5579 u8 reserved_1[0x10];
5580 u8 op_mod[0x10];
5581
5582 u8 reserved_2[0x40];
5583
5584 struct mlx5_ifc_eqc_bits eq_context_entry;
5585
5586 u8 reserved_3[0x40];
5587
5588 u8 event_bitmask[0x40];
5589
5590 u8 reserved_4[0x580];
5591
5592 u8 pas[0][0x40];
5593};
5594
5595struct mlx5_ifc_create_dct_out_bits {
5596 u8 status[0x8];
5597 u8 reserved_0[0x18];
5598
5599 u8 syndrome[0x20];
5600
5601 u8 reserved_1[0x8];
5602 u8 dctn[0x18];
5603
5604 u8 reserved_2[0x20];
5605};
5606
5607struct mlx5_ifc_create_dct_in_bits {
5608 u8 opcode[0x10];
5609 u8 reserved_0[0x10];
5610
5611 u8 reserved_1[0x10];
5612 u8 op_mod[0x10];
5613
5614 u8 reserved_2[0x40];
5615
5616 struct mlx5_ifc_dctc_bits dct_context_entry;
5617
5618 u8 reserved_3[0x180];
5619};
5620
5621struct mlx5_ifc_create_cq_out_bits {
5622 u8 status[0x8];
5623 u8 reserved_0[0x18];
5624
5625 u8 syndrome[0x20];
5626
5627 u8 reserved_1[0x8];
5628 u8 cqn[0x18];
5629
5630 u8 reserved_2[0x20];
5631};
5632
5633struct mlx5_ifc_create_cq_in_bits {
5634 u8 opcode[0x10];
5635 u8 reserved_0[0x10];
5636
5637 u8 reserved_1[0x10];
5638 u8 op_mod[0x10];
5639
5640 u8 reserved_2[0x40];
5641
5642 struct mlx5_ifc_cqc_bits cq_context;
5643
5644 u8 reserved_3[0x600];
5645
5646 u8 pas[0][0x40];
5647};
5648
5649struct mlx5_ifc_config_int_moderation_out_bits {
5650 u8 status[0x8];
5651 u8 reserved_0[0x18];
5652
5653 u8 syndrome[0x20];
5654
5655 u8 reserved_1[0x4];
5656 u8 min_delay[0xc];
5657 u8 int_vector[0x10];
5658
5659 u8 reserved_2[0x20];
5660};
5661
5662enum {
5663 MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0,
5664 MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1,
5665};
5666
5667struct mlx5_ifc_config_int_moderation_in_bits {
5668 u8 opcode[0x10];
5669 u8 reserved_0[0x10];
5670
5671 u8 reserved_1[0x10];
5672 u8 op_mod[0x10];
5673
5674 u8 reserved_2[0x4];
5675 u8 min_delay[0xc];
5676 u8 int_vector[0x10];
5677
5678 u8 reserved_3[0x20];
5679};
5680
5681struct mlx5_ifc_attach_to_mcg_out_bits {
5682 u8 status[0x8];
5683 u8 reserved_0[0x18];
5684
5685 u8 syndrome[0x20];
5686
5687 u8 reserved_1[0x40];
5688};
5689
5690struct mlx5_ifc_attach_to_mcg_in_bits {
5691 u8 opcode[0x10];
5692 u8 reserved_0[0x10];
5693
5694 u8 reserved_1[0x10];
5695 u8 op_mod[0x10];
5696
5697 u8 reserved_2[0x8];
5698 u8 qpn[0x18];
5699
5700 u8 reserved_3[0x20];
5701
5702 u8 multicast_gid[16][0x8];
5703};
5704
5705struct mlx5_ifc_arm_xrc_srq_out_bits {
5706 u8 status[0x8];
5707 u8 reserved_0[0x18];
5708
5709 u8 syndrome[0x20];
5710
5711 u8 reserved_1[0x40];
5712};
5713
5714enum {
5715 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1,
5716};
5717
5718struct mlx5_ifc_arm_xrc_srq_in_bits {
5719 u8 opcode[0x10];
5720 u8 reserved_0[0x10];
5721
5722 u8 reserved_1[0x10];
5723 u8 op_mod[0x10];
5724
5725 u8 reserved_2[0x8];
5726 u8 xrc_srqn[0x18];
5727
5728 u8 reserved_3[0x10];
5729 u8 lwm[0x10];
5730};
5731
5732struct mlx5_ifc_arm_rq_out_bits {
5733 u8 status[0x8];
5734 u8 reserved_0[0x18];
5735
5736 u8 syndrome[0x20];
5737
5738 u8 reserved_1[0x40];
5739};
5740
5741enum {
5742 MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1,
5743};
5744
5745struct mlx5_ifc_arm_rq_in_bits {
5746 u8 opcode[0x10];
5747 u8 reserved_0[0x10];
5748
5749 u8 reserved_1[0x10];
5750 u8 op_mod[0x10];
5751
5752 u8 reserved_2[0x8];
5753 u8 srq_number[0x18];
5754
5755 u8 reserved_3[0x10];
5756 u8 lwm[0x10];
5757};
5758
5759struct mlx5_ifc_arm_dct_out_bits {
5760 u8 status[0x8];
5761 u8 reserved_0[0x18];
5762
5763 u8 syndrome[0x20];
5764
5765 u8 reserved_1[0x40];
5766};
5767
5768struct mlx5_ifc_arm_dct_in_bits {
5769 u8 opcode[0x10];
5770 u8 reserved_0[0x10];
5771
5772 u8 reserved_1[0x10];
5773 u8 op_mod[0x10];
5774
5775 u8 reserved_2[0x8];
5776 u8 dct_number[0x18];
5777
5778 u8 reserved_3[0x20];
5779};
5780
5781struct mlx5_ifc_alloc_xrcd_out_bits {
5782 u8 status[0x8];
5783 u8 reserved_0[0x18];
5784
5785 u8 syndrome[0x20];
5786
5787 u8 reserved_1[0x8];
5788 u8 xrcd[0x18];
5789
5790 u8 reserved_2[0x20];
5791};
5792
5793struct mlx5_ifc_alloc_xrcd_in_bits {
5794 u8 opcode[0x10];
5795 u8 reserved_0[0x10];
5796
5797 u8 reserved_1[0x10];
5798 u8 op_mod[0x10];
5799
5800 u8 reserved_2[0x40];
5801};
5802
5803struct mlx5_ifc_alloc_uar_out_bits {
5804 u8 status[0x8];
5805 u8 reserved_0[0x18];
5806
5807 u8 syndrome[0x20];
5808
5809 u8 reserved_1[0x8];
5810 u8 uar[0x18];
5811
5812 u8 reserved_2[0x20];
5813};
5814
5815struct mlx5_ifc_alloc_uar_in_bits {
5816 u8 opcode[0x10];
5817 u8 reserved_0[0x10];
5818
5819 u8 reserved_1[0x10];
5820 u8 op_mod[0x10];
5821
5822 u8 reserved_2[0x40];
5823};
5824
5825struct mlx5_ifc_alloc_transport_domain_out_bits {
5826 u8 status[0x8];
5827 u8 reserved_0[0x18];
5828
5829 u8 syndrome[0x20];
5830
5831 u8 reserved_1[0x8];
5832 u8 transport_domain[0x18];
5833
5834 u8 reserved_2[0x20];
5835};
5836
5837struct mlx5_ifc_alloc_transport_domain_in_bits {
5838 u8 opcode[0x10];
5839 u8 reserved_0[0x10];
5840
5841 u8 reserved_1[0x10];
5842 u8 op_mod[0x10];
5843
5844 u8 reserved_2[0x40];
5845};
5846
5847struct mlx5_ifc_alloc_q_counter_out_bits {
5848 u8 status[0x8];
5849 u8 reserved_0[0x18];
5850
5851 u8 syndrome[0x20];
5852
5853 u8 reserved_1[0x18];
5854 u8 counter_set_id[0x8];
5855
5856 u8 reserved_2[0x20];
5857};
5858
5859struct mlx5_ifc_alloc_q_counter_in_bits {
5860 u8 opcode[0x10];
5861 u8 reserved_0[0x10];
5862
5863 u8 reserved_1[0x10];
5864 u8 op_mod[0x10];
5865
5866 u8 reserved_2[0x40];
5867};
5868
5869struct mlx5_ifc_alloc_pd_out_bits {
5870 u8 status[0x8];
5871 u8 reserved_0[0x18];
5872
5873 u8 syndrome[0x20];
5874
5875 u8 reserved_1[0x8];
5876 u8 pd[0x18];
5877
5878 u8 reserved_2[0x20];
5879};
5880
5881struct mlx5_ifc_alloc_pd_in_bits {
5882 u8 opcode[0x10];
5883 u8 reserved_0[0x10];
5884
5885 u8 reserved_1[0x10];
5886 u8 op_mod[0x10];
5887
5888 u8 reserved_2[0x40];
5889};
5890
5891struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
5892 u8 status[0x8];
5893 u8 reserved_0[0x18];
5894
5895 u8 syndrome[0x20];
5896
5897 u8 reserved_1[0x40];
5898};
5899
5900struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
5901 u8 opcode[0x10];
5902 u8 reserved_0[0x10];
5903
5904 u8 reserved_1[0x10];
5905 u8 op_mod[0x10];
5906
5907 u8 reserved_2[0x20];
5908
5909 u8 reserved_3[0x10];
5910 u8 vxlan_udp_port[0x10];
5911};
5912
5913struct mlx5_ifc_access_register_out_bits {
5914 u8 status[0x8];
5915 u8 reserved_0[0x18];
5916
5917 u8 syndrome[0x20];
5918
5919 u8 reserved_1[0x40];
5920
5921 u8 register_data[0][0x20];
5922};
5923
5924enum {
5925 MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0,
5926 MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1,
5927};
5928
5929struct mlx5_ifc_access_register_in_bits {
5930 u8 opcode[0x10];
5931 u8 reserved_0[0x10];
5932
5933 u8 reserved_1[0x10];
5934 u8 op_mod[0x10];
5935
5936 u8 reserved_2[0x10];
5937 u8 register_id[0x10];
5938
5939 u8 argument[0x20];
5940
5941 u8 register_data[0][0x20];
5942};
5943
5944struct mlx5_ifc_sltp_reg_bits {
5945 u8 status[0x4];
5946 u8 version[0x4];
5947 u8 local_port[0x8];
5948 u8 pnat[0x2];
5949 u8 reserved_0[0x2];
5950 u8 lane[0x4];
5951 u8 reserved_1[0x8];
5952
5953 u8 reserved_2[0x20];
5954
5955 u8 reserved_3[0x7];
5956 u8 polarity[0x1];
5957 u8 ob_tap0[0x8];
5958 u8 ob_tap1[0x8];
5959 u8 ob_tap2[0x8];
5960
5961 u8 reserved_4[0xc];
5962 u8 ob_preemp_mode[0x4];
5963 u8 ob_reg[0x8];
5964 u8 ob_bias[0x8];
5965
5966 u8 reserved_5[0x20];
5967};
5968
5969struct mlx5_ifc_slrg_reg_bits {
5970 u8 status[0x4];
5971 u8 version[0x4];
5972 u8 local_port[0x8];
5973 u8 pnat[0x2];
5974 u8 reserved_0[0x2];
5975 u8 lane[0x4];
5976 u8 reserved_1[0x8];
5977
5978 u8 time_to_link_up[0x10];
5979 u8 reserved_2[0xc];
5980 u8 grade_lane_speed[0x4];
5981
5982 u8 grade_version[0x8];
5983 u8 grade[0x18];
5984
5985 u8 reserved_3[0x4];
5986 u8 height_grade_type[0x4];
5987 u8 height_grade[0x18];
5988
5989 u8 height_dz[0x10];
5990 u8 height_dv[0x10];
5991
5992 u8 reserved_4[0x10];
5993 u8 height_sigma[0x10];
5994
5995 u8 reserved_5[0x20];
5996
5997 u8 reserved_6[0x4];
5998 u8 phase_grade_type[0x4];
5999 u8 phase_grade[0x18];
6000
6001 u8 reserved_7[0x8];
6002 u8 phase_eo_pos[0x8];
6003 u8 reserved_8[0x8];
6004 u8 phase_eo_neg[0x8];
6005
6006 u8 ffe_set_tested[0x10];
6007 u8 test_errors_per_lane[0x10];
6008};
6009
6010struct mlx5_ifc_pvlc_reg_bits {
6011 u8 reserved_0[0x8];
6012 u8 local_port[0x8];
6013 u8 reserved_1[0x10];
6014
6015 u8 reserved_2[0x1c];
6016 u8 vl_hw_cap[0x4];
6017
6018 u8 reserved_3[0x1c];
6019 u8 vl_admin[0x4];
6020
6021 u8 reserved_4[0x1c];
6022 u8 vl_operational[0x4];
6023};
6024
6025struct mlx5_ifc_pude_reg_bits {
6026 u8 swid[0x8];
6027 u8 local_port[0x8];
6028 u8 reserved_0[0x4];
6029 u8 admin_status[0x4];
6030 u8 reserved_1[0x4];
6031 u8 oper_status[0x4];
6032
6033 u8 reserved_2[0x60];
6034};
6035
6036struct mlx5_ifc_ptys_reg_bits {
6037 u8 reserved_0[0x8];
6038 u8 local_port[0x8];
6039 u8 reserved_1[0xd];
6040 u8 proto_mask[0x3];
6041
6042 u8 reserved_2[0x40];
6043
6044 u8 eth_proto_capability[0x20];
6045
6046 u8 ib_link_width_capability[0x10];
6047 u8 ib_proto_capability[0x10];
6048
6049 u8 reserved_3[0x20];
6050
6051 u8 eth_proto_admin[0x20];
6052
6053 u8 ib_link_width_admin[0x10];
6054 u8 ib_proto_admin[0x10];
6055
6056 u8 reserved_4[0x20];
6057
6058 u8 eth_proto_oper[0x20];
6059
6060 u8 ib_link_width_oper[0x10];
6061 u8 ib_proto_oper[0x10];
6062
6063 u8 reserved_5[0x20];
6064
6065 u8 eth_proto_lp_advertise[0x20];
6066
6067 u8 reserved_6[0x60];
6068};
6069
6070struct mlx5_ifc_ptas_reg_bits {
6071 u8 reserved_0[0x20];
6072
6073 u8 algorithm_options[0x10];
6074 u8 reserved_1[0x4];
6075 u8 repetitions_mode[0x4];
6076 u8 num_of_repetitions[0x8];
6077
6078 u8 grade_version[0x8];
6079 u8 height_grade_type[0x4];
6080 u8 phase_grade_type[0x4];
6081 u8 height_grade_weight[0x8];
6082 u8 phase_grade_weight[0x8];
6083
6084 u8 gisim_measure_bits[0x10];
6085 u8 adaptive_tap_measure_bits[0x10];
6086
6087 u8 ber_bath_high_error_threshold[0x10];
6088 u8 ber_bath_mid_error_threshold[0x10];
6089
6090 u8 ber_bath_low_error_threshold[0x10];
6091 u8 one_ratio_high_threshold[0x10];
6092
6093 u8 one_ratio_high_mid_threshold[0x10];
6094 u8 one_ratio_low_mid_threshold[0x10];
6095
6096 u8 one_ratio_low_threshold[0x10];
6097 u8 ndeo_error_threshold[0x10];
6098
6099 u8 mixer_offset_step_size[0x10];
6100 u8 reserved_2[0x8];
6101 u8 mix90_phase_for_voltage_bath[0x8];
6102
6103 u8 mixer_offset_start[0x10];
6104 u8 mixer_offset_end[0x10];
6105
6106 u8 reserved_3[0x15];
6107 u8 ber_test_time[0xb];
6108};
6109
6110struct mlx5_ifc_pspa_reg_bits {
6111 u8 swid[0x8];
6112 u8 local_port[0x8];
6113 u8 sub_port[0x8];
6114 u8 reserved_0[0x8];
6115
6116 u8 reserved_1[0x20];
6117};
6118
6119struct mlx5_ifc_pqdr_reg_bits {
6120 u8 reserved_0[0x8];
6121 u8 local_port[0x8];
6122 u8 reserved_1[0x5];
6123 u8 prio[0x3];
6124 u8 reserved_2[0x6];
6125 u8 mode[0x2];
6126
6127 u8 reserved_3[0x20];
6128
6129 u8 reserved_4[0x10];
6130 u8 min_threshold[0x10];
6131
6132 u8 reserved_5[0x10];
6133 u8 max_threshold[0x10];
6134
6135 u8 reserved_6[0x10];
6136 u8 mark_probability_denominator[0x10];
6137
6138 u8 reserved_7[0x60];
6139};
6140
6141struct mlx5_ifc_ppsc_reg_bits {
6142 u8 reserved_0[0x8];
6143 u8 local_port[0x8];
6144 u8 reserved_1[0x10];
6145
6146 u8 reserved_2[0x60];
6147
6148 u8 reserved_3[0x1c];
6149 u8 wrps_admin[0x4];
6150
6151 u8 reserved_4[0x1c];
6152 u8 wrps_status[0x4];
6153
6154 u8 reserved_5[0x8];
6155 u8 up_threshold[0x8];
6156 u8 reserved_6[0x8];
6157 u8 down_threshold[0x8];
6158
6159 u8 reserved_7[0x20];
6160
6161 u8 reserved_8[0x1c];
6162 u8 srps_admin[0x4];
6163
6164 u8 reserved_9[0x1c];
6165 u8 srps_status[0x4];
6166
6167 u8 reserved_10[0x40];
6168};
6169
6170struct mlx5_ifc_pplr_reg_bits {
6171 u8 reserved_0[0x8];
6172 u8 local_port[0x8];
6173 u8 reserved_1[0x10];
6174
6175 u8 reserved_2[0x8];
6176 u8 lb_cap[0x8];
6177 u8 reserved_3[0x8];
6178 u8 lb_en[0x8];
6179};
6180
6181struct mlx5_ifc_pplm_reg_bits {
6182 u8 reserved_0[0x8];
6183 u8 local_port[0x8];
6184 u8 reserved_1[0x10];
6185
6186 u8 reserved_2[0x20];
6187
6188 u8 port_profile_mode[0x8];
6189 u8 static_port_profile[0x8];
6190 u8 active_port_profile[0x8];
6191 u8 reserved_3[0x8];
6192
6193 u8 retransmission_active[0x8];
6194 u8 fec_mode_active[0x18];
6195
6196 u8 reserved_4[0x20];
6197};
6198
6199struct mlx5_ifc_ppcnt_reg_bits {
6200 u8 swid[0x8];
6201 u8 local_port[0x8];
6202 u8 pnat[0x2];
6203 u8 reserved_0[0x8];
6204 u8 grp[0x6];
6205
6206 u8 clr[0x1];
6207 u8 reserved_1[0x1c];
6208 u8 prio_tc[0x3];
6209
6210 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
6211};
6212
6213struct mlx5_ifc_ppad_reg_bits {
6214 u8 reserved_0[0x3];
6215 u8 single_mac[0x1];
6216 u8 reserved_1[0x4];
6217 u8 local_port[0x8];
6218 u8 mac_47_32[0x10];
6219
6220 u8 mac_31_0[0x20];
6221
6222 u8 reserved_2[0x40];
6223};
6224
6225struct mlx5_ifc_pmtu_reg_bits {
6226 u8 reserved_0[0x8];
6227 u8 local_port[0x8];
6228 u8 reserved_1[0x10];
6229
6230 u8 max_mtu[0x10];
6231 u8 reserved_2[0x10];
6232
6233 u8 admin_mtu[0x10];
6234 u8 reserved_3[0x10];
6235
6236 u8 oper_mtu[0x10];
6237 u8 reserved_4[0x10];
6238};
6239
6240struct mlx5_ifc_pmpr_reg_bits {
6241 u8 reserved_0[0x8];
6242 u8 module[0x8];
6243 u8 reserved_1[0x10];
6244
6245 u8 reserved_2[0x18];
6246 u8 attenuation_5g[0x8];
6247
6248 u8 reserved_3[0x18];
6249 u8 attenuation_7g[0x8];
6250
6251 u8 reserved_4[0x18];
6252 u8 attenuation_12g[0x8];
6253};
6254
6255struct mlx5_ifc_pmpe_reg_bits {
6256 u8 reserved_0[0x8];
6257 u8 module[0x8];
6258 u8 reserved_1[0xc];
6259 u8 module_status[0x4];
6260
6261 u8 reserved_2[0x60];
6262};
6263
6264struct mlx5_ifc_pmpc_reg_bits {
6265 u8 module_state_updated[32][0x8];
6266};
6267
6268struct mlx5_ifc_pmlpn_reg_bits {
6269 u8 reserved_0[0x4];
6270 u8 mlpn_status[0x4];
6271 u8 local_port[0x8];
6272 u8 reserved_1[0x10];
6273
6274 u8 e[0x1];
6275 u8 reserved_2[0x1f];
6276};
6277
6278struct mlx5_ifc_pmlp_reg_bits {
6279 u8 rxtx[0x1];
6280 u8 reserved_0[0x7];
6281 u8 local_port[0x8];
6282 u8 reserved_1[0x8];
6283 u8 width[0x8];
6284
6285 u8 lane0_module_mapping[0x20];
6286
6287 u8 lane1_module_mapping[0x20];
6288
6289 u8 lane2_module_mapping[0x20];
6290
6291 u8 lane3_module_mapping[0x20];
6292
6293 u8 reserved_2[0x160];
6294};
6295
6296struct mlx5_ifc_pmaos_reg_bits {
6297 u8 reserved_0[0x8];
6298 u8 module[0x8];
6299 u8 reserved_1[0x4];
6300 u8 admin_status[0x4];
6301 u8 reserved_2[0x4];
6302 u8 oper_status[0x4];
6303
6304 u8 ase[0x1];
6305 u8 ee[0x1];
6306 u8 reserved_3[0x1c];
6307 u8 e[0x2];
6308
6309 u8 reserved_4[0x40];
6310};
6311
6312struct mlx5_ifc_plpc_reg_bits {
6313 u8 reserved_0[0x4];
6314 u8 profile_id[0xc];
6315 u8 reserved_1[0x4];
6316 u8 proto_mask[0x4];
6317 u8 reserved_2[0x8];
6318
6319 u8 reserved_3[0x10];
6320 u8 lane_speed[0x10];
6321
6322 u8 reserved_4[0x17];
6323 u8 lpbf[0x1];
6324 u8 fec_mode_policy[0x8];
6325
6326 u8 retransmission_capability[0x8];
6327 u8 fec_mode_capability[0x18];
6328
6329 u8 retransmission_support_admin[0x8];
6330 u8 fec_mode_support_admin[0x18];
6331
6332 u8 retransmission_request_admin[0x8];
6333 u8 fec_mode_request_admin[0x18];
6334
6335 u8 reserved_5[0x80];
6336};
6337
6338struct mlx5_ifc_plib_reg_bits {
6339 u8 reserved_0[0x8];
6340 u8 local_port[0x8];
6341 u8 reserved_1[0x8];
6342 u8 ib_port[0x8];
6343
6344 u8 reserved_2[0x60];
6345};
6346
6347struct mlx5_ifc_plbf_reg_bits {
6348 u8 reserved_0[0x8];
6349 u8 local_port[0x8];
6350 u8 reserved_1[0xd];
6351 u8 lbf_mode[0x3];
6352
6353 u8 reserved_2[0x20];
6354};
6355
6356struct mlx5_ifc_pipg_reg_bits {
6357 u8 reserved_0[0x8];
6358 u8 local_port[0x8];
6359 u8 reserved_1[0x10];
6360
6361 u8 dic[0x1];
6362 u8 reserved_2[0x19];
6363 u8 ipg[0x4];
6364 u8 reserved_3[0x2];
6365};
6366
6367struct mlx5_ifc_pifr_reg_bits {
6368 u8 reserved_0[0x8];
6369 u8 local_port[0x8];
6370 u8 reserved_1[0x10];
6371
6372 u8 reserved_2[0xe0];
6373
6374 u8 port_filter[8][0x20];
6375
6376 u8 port_filter_update_en[8][0x20];
6377};
6378
6379struct mlx5_ifc_pfcc_reg_bits {
6380 u8 reserved_0[0x8];
6381 u8 local_port[0x8];
6382 u8 reserved_1[0x10];
6383
6384 u8 ppan[0x4];
6385 u8 reserved_2[0x4];
6386 u8 prio_mask_tx[0x8];
6387 u8 reserved_3[0x8];
6388 u8 prio_mask_rx[0x8];
6389
6390 u8 pptx[0x1];
6391 u8 aptx[0x1];
6392 u8 reserved_4[0x6];
6393 u8 pfctx[0x8];
6394 u8 reserved_5[0x10];
6395
6396 u8 pprx[0x1];
6397 u8 aprx[0x1];
6398 u8 reserved_6[0x6];
6399 u8 pfcrx[0x8];
6400 u8 reserved_7[0x10];
6401
6402 u8 reserved_8[0x80];
6403};
6404
6405struct mlx5_ifc_pelc_reg_bits {
6406 u8 op[0x4];
6407 u8 reserved_0[0x4];
6408 u8 local_port[0x8];
6409 u8 reserved_1[0x10];
6410
6411 u8 op_admin[0x8];
6412 u8 op_capability[0x8];
6413 u8 op_request[0x8];
6414 u8 op_active[0x8];
6415
6416 u8 admin[0x40];
6417
6418 u8 capability[0x40];
6419
6420 u8 request[0x40];
6421
6422 u8 active[0x40];
6423
6424 u8 reserved_2[0x80];
6425};
6426
6427struct mlx5_ifc_peir_reg_bits {
6428 u8 reserved_0[0x8];
6429 u8 local_port[0x8];
6430 u8 reserved_1[0x10];
6431
6432 u8 reserved_2[0xc];
6433 u8 error_count[0x4];
6434 u8 reserved_3[0x10];
6435
6436 u8 reserved_4[0xc];
6437 u8 lane[0x4];
6438 u8 reserved_5[0x8];
6439 u8 error_type[0x8];
6440};
6441
6442struct mlx5_ifc_pcap_reg_bits {
6443 u8 reserved_0[0x8];
6444 u8 local_port[0x8];
6445 u8 reserved_1[0x10];
6446
6447 u8 port_capability_mask[4][0x20];
6448};
6449
6450struct mlx5_ifc_paos_reg_bits {
6451 u8 swid[0x8];
6452 u8 local_port[0x8];
6453 u8 reserved_0[0x4];
6454 u8 admin_status[0x4];
6455 u8 reserved_1[0x4];
6456 u8 oper_status[0x4];
6457
6458 u8 ase[0x1];
6459 u8 ee[0x1];
6460 u8 reserved_2[0x1c];
6461 u8 e[0x2];
6462
6463 u8 reserved_3[0x40];
6464};
6465
6466struct mlx5_ifc_pamp_reg_bits {
6467 u8 reserved_0[0x8];
6468 u8 opamp_group[0x8];
6469 u8 reserved_1[0xc];
6470 u8 opamp_group_type[0x4];
6471
6472 u8 start_index[0x10];
6473 u8 reserved_2[0x4];
6474 u8 num_of_indices[0xc];
6475
6476 u8 index_data[18][0x10];
6477};
6478
6479struct mlx5_ifc_lane_2_module_mapping_bits {
6480 u8 reserved_0[0x6];
6481 u8 rx_lane[0x2];
6482 u8 reserved_1[0x6];
6483 u8 tx_lane[0x2];
6484 u8 reserved_2[0x8];
6485 u8 module[0x8];
6486};
6487
6488struct mlx5_ifc_bufferx_reg_bits {
6489 u8 reserved_0[0x6];
6490 u8 lossy[0x1];
6491 u8 epsb[0x1];
6492 u8 reserved_1[0xc];
6493 u8 size[0xc];
6494
6495 u8 xoff_threshold[0x10];
6496 u8 xon_threshold[0x10];
6497};
6498
6499struct mlx5_ifc_set_node_in_bits {
6500 u8 node_description[64][0x8];
6501};
6502
6503struct mlx5_ifc_register_power_settings_bits {
6504 u8 reserved_0[0x18];
6505 u8 power_settings_level[0x8];
6506
6507 u8 reserved_1[0x60];
6508};
6509
6510struct mlx5_ifc_register_host_endianness_bits {
6511 u8 he[0x1];
6512 u8 reserved_0[0x1f];
6513
6514 u8 reserved_1[0x60];
6515};
6516
6517struct mlx5_ifc_umr_pointer_desc_argument_bits {
6518 u8 reserved_0[0x20];
6519
6520 u8 mkey[0x20];
6521
6522 u8 addressh_63_32[0x20];
6523
6524 u8 addressl_31_0[0x20];
6525};
6526
6527struct mlx5_ifc_ud_adrs_vector_bits {
6528 u8 dc_key[0x40];
6529
6530 u8 ext[0x1];
6531 u8 reserved_0[0x7];
6532 u8 destination_qp_dct[0x18];
6533
6534 u8 static_rate[0x4];
6535 u8 sl_eth_prio[0x4];
6536 u8 fl[0x1];
6537 u8 mlid[0x7];
6538 u8 rlid_udp_sport[0x10];
6539
6540 u8 reserved_1[0x20];
6541
6542 u8 rmac_47_16[0x20];
6543
6544 u8 rmac_15_0[0x10];
6545 u8 tclass[0x8];
6546 u8 hop_limit[0x8];
6547
6548 u8 reserved_2[0x1];
6549 u8 grh[0x1];
6550 u8 reserved_3[0x2];
6551 u8 src_addr_index[0x8];
6552 u8 flow_label[0x14];
6553
6554 u8 rgid_rip[16][0x8];
6555};
6556
6557struct mlx5_ifc_pages_req_event_bits {
6558 u8 reserved_0[0x10];
6559 u8 function_id[0x10];
6560
6561 u8 num_pages[0x20];
6562
6563 u8 reserved_1[0xa0];
6564};
6565
6566struct mlx5_ifc_eqe_bits {
6567 u8 reserved_0[0x8];
6568 u8 event_type[0x8];
6569 u8 reserved_1[0x8];
6570 u8 event_sub_type[0x8];
6571
6572 u8 reserved_2[0xe0];
6573
6574 union mlx5_ifc_event_auto_bits event_data;
6575
6576 u8 reserved_3[0x10];
6577 u8 signature[0x8];
6578 u8 reserved_4[0x7];
6579 u8 owner[0x1];
6580};
6581
6582enum {
6583 MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7,
6584};
6585
6586struct mlx5_ifc_cmd_queue_entry_bits {
6587 u8 type[0x8];
6588 u8 reserved_0[0x18];
6589
6590 u8 input_length[0x20];
6591
6592 u8 input_mailbox_pointer_63_32[0x20];
6593
6594 u8 input_mailbox_pointer_31_9[0x17];
6595 u8 reserved_1[0x9];
6596
6597 u8 command_input_inline_data[16][0x8];
6598
6599 u8 command_output_inline_data[16][0x8];
6600
6601 u8 output_mailbox_pointer_63_32[0x20];
6602
6603 u8 output_mailbox_pointer_31_9[0x17];
6604 u8 reserved_2[0x9];
6605
6606 u8 output_length[0x20];
6607
6608 u8 token[0x8];
6609 u8 signature[0x8];
6610 u8 reserved_3[0x8];
6611 u8 status[0x7];
6612 u8 ownership[0x1];
6613};
6614
6615struct mlx5_ifc_cmd_out_bits {
6616 u8 status[0x8];
6617 u8 reserved_0[0x18];
6618
6619 u8 syndrome[0x20];
6620
6621 u8 command_output[0x20];
6622};
6623
6624struct mlx5_ifc_cmd_in_bits {
6625 u8 opcode[0x10];
6626 u8 reserved_0[0x10];
6627
6628 u8 reserved_1[0x10];
6629 u8 op_mod[0x10];
6630
6631 u8 command[0][0x20];
6632};
6633
6634struct mlx5_ifc_cmd_if_box_bits {
6635 u8 mailbox_data[512][0x8];
6636
6637 u8 reserved_0[0x180];
6638
6639 u8 next_pointer_63_32[0x20];
6640
6641 u8 next_pointer_31_10[0x16];
6642 u8 reserved_1[0xa];
6643
6644 u8 block_number[0x20];
6645
6646 u8 reserved_2[0x8];
6647 u8 token[0x8];
6648 u8 ctrl_signature[0x8];
6649 u8 signature[0x8];
6650};
6651
6652struct mlx5_ifc_mtt_bits {
6653 u8 ptag_63_32[0x20];
6654
6655 u8 ptag_31_8[0x18];
6656 u8 reserved_0[0x6];
6657 u8 wr_en[0x1];
6658 u8 rd_en[0x1];
6659};
6660
6661enum {
6662 MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
6663 MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
6664 MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
6665};
6666
6667enum {
6668 MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0,
6669 MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1,
6670 MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2,
6671};
6672
6673enum {
6674 MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1,
6675 MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7,
6676 MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8,
6677 MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9,
6678 MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa,
6679 MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb,
6680 MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc,
6681 MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd,
6682 MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
6683 MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
6684 MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
6685};
6686
6687struct mlx5_ifc_initial_seg_bits {
6688 u8 fw_rev_minor[0x10];
6689 u8 fw_rev_major[0x10];
6690
6691 u8 cmd_interface_rev[0x10];
6692 u8 fw_rev_subminor[0x10];
6693
6694 u8 reserved_0[0x40];
6695
6696 u8 cmdq_phy_addr_63_32[0x20];
6697
6698 u8 cmdq_phy_addr_31_12[0x14];
6699 u8 reserved_1[0x2];
6700 u8 nic_interface[0x2];
6701 u8 log_cmdq_size[0x4];
6702 u8 log_cmdq_stride[0x4];
6703
6704 u8 command_doorbell_vector[0x20];
6705
6706 u8 reserved_2[0xf00];
6707
6708 u8 initializing[0x1];
6709 u8 reserved_3[0x4];
6710 u8 nic_interface_supported[0x3];
6711 u8 reserved_4[0x18];
6712
6713 struct mlx5_ifc_health_buffer_bits health_buffer;
6714
6715 u8 no_dram_nic_offset[0x20];
6716
6717 u8 reserved_5[0x6e40];
6718
6719 u8 reserved_6[0x1f];
6720 u8 clear_int[0x1];
6721
6722 u8 health_syndrome[0x8];
6723 u8 health_counter[0x18];
6724
6725 u8 reserved_7[0x17fc0];
6726};
6727
6728union mlx5_ifc_ports_control_registers_document_bits {
6729 struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
6730 struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
6731 struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
6732 struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
6733 struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
6734 struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
6735 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
6736 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
6737 struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
6738 struct mlx5_ifc_pamp_reg_bits pamp_reg;
6739 struct mlx5_ifc_paos_reg_bits paos_reg;
6740 struct mlx5_ifc_pcap_reg_bits pcap_reg;
6741 struct mlx5_ifc_peir_reg_bits peir_reg;
6742 struct mlx5_ifc_pelc_reg_bits pelc_reg;
6743 struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
6744 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
6745 struct mlx5_ifc_pifr_reg_bits pifr_reg;
6746 struct mlx5_ifc_pipg_reg_bits pipg_reg;
6747 struct mlx5_ifc_plbf_reg_bits plbf_reg;
6748 struct mlx5_ifc_plib_reg_bits plib_reg;
6749 struct mlx5_ifc_plpc_reg_bits plpc_reg;
6750 struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
6751 struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
6752 struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
6753 struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
6754 struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
6755 struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
6756 struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
6757 struct mlx5_ifc_ppad_reg_bits ppad_reg;
6758 struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
6759 struct mlx5_ifc_pplm_reg_bits pplm_reg;
6760 struct mlx5_ifc_pplr_reg_bits pplr_reg;
6761 struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
6762 struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
6763 struct mlx5_ifc_pspa_reg_bits pspa_reg;
6764 struct mlx5_ifc_ptas_reg_bits ptas_reg;
6765 struct mlx5_ifc_ptys_reg_bits ptys_reg;
6766 struct mlx5_ifc_pude_reg_bits pude_reg;
6767 struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
6768 struct mlx5_ifc_slrg_reg_bits slrg_reg;
6769 struct mlx5_ifc_sltp_reg_bits sltp_reg;
6770 u8 reserved_0[0x60e0];
6771};
6772
6773union mlx5_ifc_debug_enhancements_document_bits {
6774 struct mlx5_ifc_health_buffer_bits health_buffer;
6775 u8 reserved_0[0x200];
6776};
6777
6778union mlx5_ifc_uplink_pci_interface_document_bits {
6779 struct mlx5_ifc_initial_seg_bits initial_seg;
6780 u8 reserved_0[0x20060];
347}; 6781};
348 6782
349#endif /* MLX5_IFC_H */ 6783#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 310b5f7fd6ae..f079fb1a31f7 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -134,13 +134,21 @@ enum {
134 134
135enum { 135enum {
136 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, 136 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
137 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
137 MLX5_WQE_CTRL_SOLICITED = 1 << 1, 138 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
138}; 139};
139 140
140enum { 141enum {
142 MLX5_SEND_WQE_DS = 16,
141 MLX5_SEND_WQE_BB = 64, 143 MLX5_SEND_WQE_BB = 64,
142}; 144};
143 145
146#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
147
148enum {
149 MLX5_SEND_WQE_MAX_WQEBBS = 16,
150};
151
144enum { 152enum {
145 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, 153 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
146 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, 154 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
200#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 208#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
201#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 209#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
202 210
211enum {
212 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
213 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
214 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
215 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
216};
217
218struct mlx5_wqe_eth_seg {
219 u8 rsvd0[4];
220 u8 cs_flags;
221 u8 rsvd1;
222 __be16 mss;
223 __be32 rsvd2;
224 __be16 inline_hdr_sz;
225 u8 inline_hdr_start[2];
226};
227
203struct mlx5_wqe_xrc_seg { 228struct mlx5_wqe_xrc_seg {
204 __be32 xrc_srqn; 229 __be32 xrc_srqn;
205 u8 rsvd[12]; 230 u8 rsvd[12];
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
new file mode 100644
index 000000000000..967e0fd06e89
--- /dev/null
+++ b/include/linux/mlx5/vport.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_VPORT_H__
34#define __MLX5_VPORT_H__
35
36#include <linux/mlx5/driver.h>
37
38u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
39void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
40int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
41 u8 port_num, u16 vf_num, u16 gid_index,
42 union ib_gid *gid);
43int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
44 u8 port_num, u16 vf_num, u16 pkey_index,
45 u16 *pkey);
46int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
47 u8 other_vport, u8 port_num,
48 u16 vf_num,
49 struct mlx5_hca_vport_context *rep);
50int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
51 u64 *sys_image_guid);
52int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
53 u64 *node_guid);
54
55#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
new file mode 100644
index 000000000000..4efc3f56e6df
--- /dev/null
+++ b/include/linux/mm-arch-hooks.h
@@ -0,0 +1,25 @@
1/*
2 * Generic mm no-op hooks.
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef _LINUX_MM_ARCH_HOOKS_H
12#define _LINUX_MM_ARCH_HOOKS_H
13
14#include <asm/mm-arch-hooks.h>
15
16#ifndef arch_remap
17static inline void arch_remap(struct mm_struct *mm,
18 unsigned long old_start, unsigned long old_end,
19 unsigned long new_start, unsigned long new_end)
20{
21}
22#define arch_remap arch_remap
23#endif
24
25#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0755b9fd03a7..2e872f92dbac 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -27,6 +27,7 @@ struct anon_vma_chain;
27struct file_ra_state; 27struct file_ra_state;
28struct user_struct; 28struct user_struct;
29struct writeback_control; 29struct writeback_control;
30struct bdi_writeback;
30 31
31#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 32#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
32extern unsigned long max_mapnr; 33extern unsigned long max_mapnr;
@@ -499,7 +500,7 @@ static inline int page_count(struct page *page)
499 500
500static inline bool __compound_tail_refcounted(struct page *page) 501static inline bool __compound_tail_refcounted(struct page *page)
501{ 502{
502 return !PageSlab(page) && !PageHeadHuge(page); 503 return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page);
503} 504}
504 505
505/* 506/*
@@ -1211,10 +1212,13 @@ int __set_page_dirty_nobuffers(struct page *page);
1211int __set_page_dirty_no_writeback(struct page *page); 1212int __set_page_dirty_no_writeback(struct page *page);
1212int redirty_page_for_writepage(struct writeback_control *wbc, 1213int redirty_page_for_writepage(struct writeback_control *wbc,
1213 struct page *page); 1214 struct page *page);
1214void account_page_dirtied(struct page *page, struct address_space *mapping); 1215void account_page_dirtied(struct page *page, struct address_space *mapping,
1215void account_page_cleaned(struct page *page, struct address_space *mapping); 1216 struct mem_cgroup *memcg);
1217void account_page_cleaned(struct page *page, struct address_space *mapping,
1218 struct mem_cgroup *memcg, struct bdi_writeback *wb);
1216int set_page_dirty(struct page *page); 1219int set_page_dirty(struct page *page);
1217int set_page_dirty_lock(struct page *page); 1220int set_page_dirty_lock(struct page *page);
1221void cancel_dirty_page(struct page *page);
1218int clear_page_dirty_for_io(struct page *page); 1222int clear_page_dirty_for_io(struct page *page);
1219 1223
1220int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1224int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -1631,6 +1635,8 @@ extern void free_highmem_page(struct page *page);
1631extern void adjust_managed_page_count(struct page *page, long count); 1635extern void adjust_managed_page_count(struct page *page, long count);
1632extern void mem_init_print_info(const char *str); 1636extern void mem_init_print_info(const char *str);
1633 1637
1638extern void reserve_bootmem_region(unsigned long start, unsigned long end);
1639
1634/* Free the reserved page into the buddy system, so it gets managed. */ 1640/* Free the reserved page into the buddy system, so it gets managed. */
1635static inline void __free_reserved_page(struct page *page) 1641static inline void __free_reserved_page(struct page *page)
1636{ 1642{
@@ -1720,7 +1726,8 @@ extern void sparse_memory_present_with_active_regions(int nid);
1720 1726
1721#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1727#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1722 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1728 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1723static inline int __early_pfn_to_nid(unsigned long pfn) 1729static inline int __early_pfn_to_nid(unsigned long pfn,
1730 struct mminit_pfnnid_cache *state)
1724{ 1731{
1725 return 0; 1732 return 0;
1726} 1733}
@@ -1728,7 +1735,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn)
1728/* please see mm/page_alloc.c */ 1735/* please see mm/page_alloc.c */
1729extern int __meminit early_pfn_to_nid(unsigned long pfn); 1736extern int __meminit early_pfn_to_nid(unsigned long pfn);
1730/* there is a per-arch backend function. */ 1737/* there is a per-arch backend function. */
1731extern int __meminit __early_pfn_to_nid(unsigned long pfn); 1738extern int __meminit __early_pfn_to_nid(unsigned long pfn,
1739 struct mminit_pfnnid_cache *state);
1732#endif 1740#endif
1733 1741
1734extern void set_dma_reserve(unsigned long new_dma_reserve); 1742extern void set_dma_reserve(unsigned long new_dma_reserve);
@@ -2146,12 +2154,47 @@ enum mf_flags {
2146extern int memory_failure(unsigned long pfn, int trapno, int flags); 2154extern int memory_failure(unsigned long pfn, int trapno, int flags);
2147extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2155extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2148extern int unpoison_memory(unsigned long pfn); 2156extern int unpoison_memory(unsigned long pfn);
2157extern int get_hwpoison_page(struct page *page);
2149extern int sysctl_memory_failure_early_kill; 2158extern int sysctl_memory_failure_early_kill;
2150extern int sysctl_memory_failure_recovery; 2159extern int sysctl_memory_failure_recovery;
2151extern void shake_page(struct page *p, int access); 2160extern void shake_page(struct page *p, int access);
2152extern atomic_long_t num_poisoned_pages; 2161extern atomic_long_t num_poisoned_pages;
2153extern int soft_offline_page(struct page *page, int flags); 2162extern int soft_offline_page(struct page *page, int flags);
2154 2163
2164
2165/*
2166 * Error handlers for various types of pages.
2167 */
2168enum mf_result {
2169 MF_IGNORED, /* Error: cannot be handled */
2170 MF_FAILED, /* Error: handling failed */
2171 MF_DELAYED, /* Will be handled later */
2172 MF_RECOVERED, /* Successfully recovered */
2173};
2174
2175enum mf_action_page_type {
2176 MF_MSG_KERNEL,
2177 MF_MSG_KERNEL_HIGH_ORDER,
2178 MF_MSG_SLAB,
2179 MF_MSG_DIFFERENT_COMPOUND,
2180 MF_MSG_POISONED_HUGE,
2181 MF_MSG_HUGE,
2182 MF_MSG_FREE_HUGE,
2183 MF_MSG_UNMAP_FAILED,
2184 MF_MSG_DIRTY_SWAPCACHE,
2185 MF_MSG_CLEAN_SWAPCACHE,
2186 MF_MSG_DIRTY_MLOCKED_LRU,
2187 MF_MSG_CLEAN_MLOCKED_LRU,
2188 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2189 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2190 MF_MSG_DIRTY_LRU,
2191 MF_MSG_CLEAN_LRU,
2192 MF_MSG_TRUNCATED_LRU,
2193 MF_MSG_BUDDY,
2194 MF_MSG_BUDDY_2ND,
2195 MF_MSG_UNKNOWN,
2196};
2197
2155#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2198#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2156extern void clear_huge_page(struct page *page, 2199extern void clear_huge_page(struct page *page,
2157 unsigned long addr, 2200 unsigned long addr,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8d37e26a1007..0038ac7466fd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -226,6 +226,24 @@ struct page_frag {
226#endif 226#endif
227}; 227};
228 228
229#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
230#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
231
232struct page_frag_cache {
233 void * va;
234#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
235 __u16 offset;
236 __u16 size;
237#else
238 __u32 offset;
239#endif
240 /* we maintain a pagecount bias, so that we dont dirty cache line
241 * containing page->_count every time we allocate a fragment.
242 */
243 unsigned int pagecnt_bias;
244 bool pfmemalloc;
245};
246
229typedef unsigned long __nocast vm_flags_t; 247typedef unsigned long __nocast vm_flags_t;
230 248
231/* 249/*
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index c5d52780d6a0..3ba327af055c 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
106extern void disable_mmiotrace(void); 106extern void disable_mmiotrace(void);
107extern void mmio_trace_rw(struct mmiotrace_rw *rw); 107extern void mmio_trace_rw(struct mmiotrace_rw *rw);
108extern void mmio_trace_mapping(struct mmiotrace_map *map); 108extern void mmio_trace_mapping(struct mmiotrace_map *map);
109extern int mmio_trace_printk(const char *fmt, va_list args); 109extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
110 110
111#endif /* _LINUX_MMIOTRACE_H */ 111#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 95243d28a0ee..61cd67f4d788 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -324,25 +324,25 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
324 ___pte; \ 324 ___pte; \
325}) 325})
326 326
327#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ 327#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
328({ \ 328({ \
329 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ 329 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
330 struct mm_struct *___mm = (__vma)->vm_mm; \ 330 struct mm_struct *___mm = (__vma)->vm_mm; \
331 pmd_t ___pmd; \ 331 pmd_t ___pmd; \
332 \ 332 \
333 ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ 333 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
334 mmu_notifier_invalidate_range(___mm, ___haddr, \ 334 mmu_notifier_invalidate_range(___mm, ___haddr, \
335 ___haddr + HPAGE_PMD_SIZE); \ 335 ___haddr + HPAGE_PMD_SIZE); \
336 \ 336 \
337 ___pmd; \ 337 ___pmd; \
338}) 338})
339 339
340#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ 340#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
341({ \ 341({ \
342 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ 342 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
343 pmd_t ___pmd; \ 343 pmd_t ___pmd; \
344 \ 344 \
345 ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ 345 ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
346 mmu_notifier_invalidate_range(__mm, ___haddr, \ 346 mmu_notifier_invalidate_range(__mm, ___haddr, \
347 ___haddr + HPAGE_PMD_SIZE); \ 347 ___haddr + HPAGE_PMD_SIZE); \
348 \ 348 \
@@ -428,8 +428,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
428#define ptep_clear_flush_young_notify ptep_clear_flush_young 428#define ptep_clear_flush_young_notify ptep_clear_flush_young
429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young 429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
430#define ptep_clear_flush_notify ptep_clear_flush 430#define ptep_clear_flush_notify ptep_clear_flush
431#define pmdp_clear_flush_notify pmdp_clear_flush 431#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
432#define pmdp_get_and_clear_notify pmdp_get_and_clear 432#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
433#define set_pte_at_notify set_pte_at 433#define set_pte_at_notify set_pte_at
434 434
435#endif /* CONFIG_MMU_NOTIFIER */ 435#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 54d74f6eb233..754c25966a0a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -762,6 +762,14 @@ typedef struct pglist_data {
762 /* Number of pages migrated during the rate limiting time interval */ 762 /* Number of pages migrated during the rate limiting time interval */
763 unsigned long numabalancing_migrate_nr_pages; 763 unsigned long numabalancing_migrate_nr_pages;
764#endif 764#endif
765
766#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
767 /*
768 * If memory initialisation on large machines is deferred then this
769 * is the first PFN that needs to be initialised.
770 */
771 unsigned long first_deferred_pfn;
772#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
765} pg_data_t; 773} pg_data_t;
766 774
767#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 775#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -1216,11 +1224,16 @@ void sparse_init(void);
1216#define sparse_index_init(_sec, _nid) do {} while (0) 1224#define sparse_index_init(_sec, _nid) do {} while (0)
1217#endif /* CONFIG_SPARSEMEM */ 1225#endif /* CONFIG_SPARSEMEM */
1218 1226
1219#ifdef CONFIG_NODES_SPAN_OTHER_NODES 1227/*
1220bool early_pfn_in_nid(unsigned long pfn, int nid); 1228 * During memory init memblocks map pfns to nids. The search is expensive and
1221#else 1229 * this caches recent lookups. The implementation of __early_pfn_to_nid
1222#define early_pfn_in_nid(pfn, nid) (1) 1230 * may treat start/end as pfns or sections.
1223#endif 1231 */
1232struct mminit_pfnnid_cache {
1233 unsigned long last_start;
1234 unsigned long last_end;
1235 int last_nid;
1236};
1224 1237
1225#ifndef early_pfn_valid 1238#ifndef early_pfn_valid
1226#define early_pfn_valid(pfn) (1) 1239#define early_pfn_valid(pfn) (1)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 3bfd56778c29..34f25b7bf642 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -189,6 +189,8 @@ struct css_device_id {
189struct acpi_device_id { 189struct acpi_device_id {
190 __u8 id[ACPI_ID_LEN]; 190 __u8 id[ACPI_ID_LEN];
191 kernel_ulong_t driver_data; 191 kernel_ulong_t driver_data;
192 __u32 cls;
193 __u32 cls_msk;
192}; 194};
193 195
194#define PNP_ID_LEN 8 196#define PNP_ID_LEN 8
@@ -599,9 +601,22 @@ struct ipack_device_id {
599 601
600#define MEI_CL_MODULE_PREFIX "mei:" 602#define MEI_CL_MODULE_PREFIX "mei:"
601#define MEI_CL_NAME_SIZE 32 603#define MEI_CL_NAME_SIZE 32
604#define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
605#define MEI_CL_UUID_ARGS(_u) \
606 _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \
607 _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15]
602 608
609/**
610 * struct mei_cl_device_id - MEI client device identifier
611 * @name: helper name
612 * @uuid: client uuid
613 * @driver_info: information used by the driver.
614 *
615 * identifies mei client device by uuid and name
616 */
603struct mei_cl_device_id { 617struct mei_cl_device_id {
604 char name[MEI_CL_NAME_SIZE]; 618 char name[MEI_CL_NAME_SIZE];
619 uuid_le uuid;
605 kernel_ulong_t driver_info; 620 kernel_ulong_t driver_info;
606}; 621};
607 622
@@ -629,4 +644,10 @@ struct mcb_device_id {
629 kernel_ulong_t driver_data; 644 kernel_ulong_t driver_data;
630}; 645};
631 646
647struct ulpi_device_id {
648 __u16 vendor;
649 __u16 product;
650 kernel_ulong_t driver_data;
651};
652
632#endif /* LINUX_MOD_DEVICETABLE_H */ 653#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 1e5436042eb0..3a19c79918e0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,12 +11,14 @@
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <linux/cache.h> 12#include <linux/cache.h>
13#include <linux/kmod.h> 13#include <linux/kmod.h>
14#include <linux/init.h>
14#include <linux/elf.h> 15#include <linux/elf.h>
15#include <linux/stringify.h> 16#include <linux/stringify.h>
16#include <linux/kobject.h> 17#include <linux/kobject.h>
17#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
18#include <linux/jump_label.h> 19#include <linux/jump_label.h>
19#include <linux/export.h> 20#include <linux/export.h>
21#include <linux/rbtree_latch.h>
20 22
21#include <linux/percpu.h> 23#include <linux/percpu.h>
22#include <asm/module.h> 24#include <asm/module.h>
@@ -70,6 +72,89 @@ extern struct module_attribute module_uevent;
70extern int init_module(void); 72extern int init_module(void);
71extern void cleanup_module(void); 73extern void cleanup_module(void);
72 74
75#ifndef MODULE
76/**
77 * module_init() - driver initialization entry point
78 * @x: function to be run at kernel boot time or module insertion
79 *
80 * module_init() will either be called during do_initcalls() (if
81 * builtin) or at module insertion time (if a module). There can only
82 * be one per module.
83 */
84#define module_init(x) __initcall(x);
85
86/**
87 * module_exit() - driver exit entry point
88 * @x: function to be run when driver is removed
89 *
90 * module_exit() will wrap the driver clean-up code
91 * with cleanup_module() when used with rmmod when
92 * the driver is a module. If the driver is statically
93 * compiled into the kernel, module_exit() has no effect.
94 * There can only be one per module.
95 */
96#define module_exit(x) __exitcall(x);
97
98#else /* MODULE */
99
100/*
101 * In most cases loadable modules do not need custom
102 * initcall levels. There are still some valid cases where
103 * a driver may be needed early if built in, and does not
104 * matter when built as a loadable module. Like bus
105 * snooping debug drivers.
106 */
107#define early_initcall(fn) module_init(fn)
108#define core_initcall(fn) module_init(fn)
109#define core_initcall_sync(fn) module_init(fn)
110#define postcore_initcall(fn) module_init(fn)
111#define postcore_initcall_sync(fn) module_init(fn)
112#define arch_initcall(fn) module_init(fn)
113#define subsys_initcall(fn) module_init(fn)
114#define subsys_initcall_sync(fn) module_init(fn)
115#define fs_initcall(fn) module_init(fn)
116#define fs_initcall_sync(fn) module_init(fn)
117#define rootfs_initcall(fn) module_init(fn)
118#define device_initcall(fn) module_init(fn)
119#define device_initcall_sync(fn) module_init(fn)
120#define late_initcall(fn) module_init(fn)
121#define late_initcall_sync(fn) module_init(fn)
122
123#define console_initcall(fn) module_init(fn)
124#define security_initcall(fn) module_init(fn)
125
126/* Each module must use one module_init(). */
127#define module_init(initfn) \
128 static inline initcall_t __inittest(void) \
129 { return initfn; } \
130 int init_module(void) __attribute__((alias(#initfn)));
131
132/* This is only required if you want to be unloadable. */
133#define module_exit(exitfn) \
134 static inline exitcall_t __exittest(void) \
135 { return exitfn; } \
136 void cleanup_module(void) __attribute__((alias(#exitfn)));
137
138#endif
139
140/* This means "can be init if no module support, otherwise module load
141 may call it." */
142#ifdef CONFIG_MODULES
143#define __init_or_module
144#define __initdata_or_module
145#define __initconst_or_module
146#define __INIT_OR_MODULE .text
147#define __INITDATA_OR_MODULE .data
148#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
149#else
150#define __init_or_module __init
151#define __initdata_or_module __initdata
152#define __initconst_or_module __initconst
153#define __INIT_OR_MODULE __INIT
154#define __INITDATA_OR_MODULE __INITDATA
155#define __INITRODATA_OR_MODULE __INITRODATA
156#endif /*CONFIG_MODULES*/
157
73/* Archs provide a method of finding the correct exception table. */ 158/* Archs provide a method of finding the correct exception table. */
74struct exception_table_entry; 159struct exception_table_entry;
75 160
@@ -210,6 +295,13 @@ enum module_state {
210 MODULE_STATE_UNFORMED, /* Still setting it up. */ 295 MODULE_STATE_UNFORMED, /* Still setting it up. */
211}; 296};
212 297
298struct module;
299
300struct mod_tree_node {
301 struct module *mod;
302 struct latch_tree_node node;
303};
304
213struct module { 305struct module {
214 enum module_state state; 306 enum module_state state;
215 307
@@ -232,6 +324,9 @@ struct module {
232 unsigned int num_syms; 324 unsigned int num_syms;
233 325
234 /* Kernel parameters. */ 326 /* Kernel parameters. */
327#ifdef CONFIG_SYSFS
328 struct mutex param_lock;
329#endif
235 struct kernel_param *kp; 330 struct kernel_param *kp;
236 unsigned int num_kp; 331 unsigned int num_kp;
237 332
@@ -257,6 +352,8 @@ struct module {
257 bool sig_ok; 352 bool sig_ok;
258#endif 353#endif
259 354
355 bool async_probe_requested;
356
260 /* symbols that will be GPL-only in the near future. */ 357 /* symbols that will be GPL-only in the near future. */
261 const struct kernel_symbol *gpl_future_syms; 358 const struct kernel_symbol *gpl_future_syms;
262 const unsigned long *gpl_future_crcs; 359 const unsigned long *gpl_future_crcs;
@@ -269,8 +366,15 @@ struct module {
269 /* Startup function. */ 366 /* Startup function. */
270 int (*init)(void); 367 int (*init)(void);
271 368
272 /* If this is non-NULL, vfree after init() returns */ 369 /*
273 void *module_init; 370 * If this is non-NULL, vfree() after init() returns.
371 *
372 * Cacheline align here, such that:
373 * module_init, module_core, init_size, core_size,
374 * init_text_size, core_text_size and mtn_core::{mod,node[0]}
375 * are on the same cacheline.
376 */
377 void *module_init ____cacheline_aligned;
274 378
275 /* Here is the actual code + data, vfree'd on unload. */ 379 /* Here is the actual code + data, vfree'd on unload. */
276 void *module_core; 380 void *module_core;
@@ -281,6 +385,16 @@ struct module {
281 /* The size of the executable code in each section. */ 385 /* The size of the executable code in each section. */
282 unsigned int init_text_size, core_text_size; 386 unsigned int init_text_size, core_text_size;
283 387
388#ifdef CONFIG_MODULES_TREE_LOOKUP
389 /*
390 * We want mtn_core::{mod,node[0]} to be in the same cacheline as the
391 * above entries such that a regular lookup will only touch one
392 * cacheline.
393 */
394 struct mod_tree_node mtn_core;
395 struct mod_tree_node mtn_init;
396#endif
397
284 /* Size of RO sections of the module (text+rodata) */ 398 /* Size of RO sections of the module (text+rodata) */
285 unsigned int init_ro_size, core_ro_size; 399 unsigned int init_ro_size, core_ro_size;
286 400
@@ -336,7 +450,7 @@ struct module {
336 const char **trace_bprintk_fmt_start; 450 const char **trace_bprintk_fmt_start;
337#endif 451#endif
338#ifdef CONFIG_EVENT_TRACING 452#ifdef CONFIG_EVENT_TRACING
339 struct ftrace_event_call **trace_events; 453 struct trace_event_call **trace_events;
340 unsigned int num_trace_events; 454 unsigned int num_trace_events;
341 struct trace_enum_map **trace_enums; 455 struct trace_enum_map **trace_enums;
342 unsigned int num_trace_enums; 456 unsigned int num_trace_enums;
@@ -367,7 +481,7 @@ struct module {
367 ctor_fn_t *ctors; 481 ctor_fn_t *ctors;
368 unsigned int num_ctors; 482 unsigned int num_ctors;
369#endif 483#endif
370}; 484} ____cacheline_aligned;
371#ifndef MODULE_ARCH_INIT 485#ifndef MODULE_ARCH_INIT
372#define MODULE_ARCH_INIT {} 486#define MODULE_ARCH_INIT {}
373#endif 487#endif
@@ -421,14 +535,22 @@ struct symsearch {
421 bool unused; 535 bool unused;
422}; 536};
423 537
424/* Search for an exported symbol by name. */ 538/*
539 * Search for an exported symbol by name.
540 *
541 * Must be called with module_mutex held or preemption disabled.
542 */
425const struct kernel_symbol *find_symbol(const char *name, 543const struct kernel_symbol *find_symbol(const char *name,
426 struct module **owner, 544 struct module **owner,
427 const unsigned long **crc, 545 const unsigned long **crc,
428 bool gplok, 546 bool gplok,
429 bool warn); 547 bool warn);
430 548
431/* Walk the exported symbol table */ 549/*
550 * Walk the exported symbol table
551 *
552 * Must be called with module_mutex held or preemption disabled.
553 */
432bool each_symbol_section(bool (*fn)(const struct symsearch *arr, 554bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
433 struct module *owner, 555 struct module *owner,
434 void *data), void *data); 556 void *data), void *data);
@@ -508,6 +630,11 @@ int unregister_module_notifier(struct notifier_block *nb);
508 630
509extern void print_modules(void); 631extern void print_modules(void);
510 632
633static inline bool module_requested_async_probing(struct module *module)
634{
635 return module && module->async_probe_requested;
636}
637
511#else /* !CONFIG_MODULES... */ 638#else /* !CONFIG_MODULES... */
512 639
513/* Given an address, look for it in the exception tables. */ 640/* Given an address, look for it in the exception tables. */
@@ -618,6 +745,12 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
618static inline void print_modules(void) 745static inline void print_modules(void)
619{ 746{
620} 747}
748
749static inline bool module_requested_async_probing(struct module *module)
750{
751 return false;
752}
753
621#endif /* CONFIG_MODULES */ 754#endif /* CONFIG_MODULES */
622 755
623#ifdef CONFIG_SYSFS 756#ifdef CONFIG_SYSFS
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 1c9effa25e26..c12f2147c350 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -67,8 +67,9 @@ enum {
67 67
68struct kernel_param { 68struct kernel_param {
69 const char *name; 69 const char *name;
70 struct module *mod;
70 const struct kernel_param_ops *ops; 71 const struct kernel_param_ops *ops;
71 u16 perm; 72 const u16 perm;
72 s8 level; 73 s8 level;
73 u8 flags; 74 u8 flags;
74 union { 75 union {
@@ -108,7 +109,7 @@ struct kparam_array
108 * 109 *
109 * @perm is 0 if the the variable is not to appear in sysfs, or 0444 110 * @perm is 0 if the the variable is not to appear in sysfs, or 0444
110 * for world-readable, 0644 for root-writable, etc. Note that if it 111 * for world-readable, 0644 for root-writable, etc. Note that if it
111 * is writable, you may need to use kparam_block_sysfs_write() around 112 * is writable, you may need to use kernel_param_lock() around
112 * accesses (esp. charp, which can be kfreed when it changes). 113 * accesses (esp. charp, which can be kfreed when it changes).
113 * 114 *
114 * The @type is simply pasted to refer to a param_ops_##type and a 115 * The @type is simply pasted to refer to a param_ops_##type and a
@@ -216,16 +217,16 @@ struct kparam_array
216 parameters. */ 217 parameters. */
217#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \ 218#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
218 /* Default value instead of permissions? */ \ 219 /* Default value instead of permissions? */ \
219 static const char __param_str_##name[] = prefix #name; \ 220 static const char __param_str_##name[] = prefix #name; \
220 static struct kernel_param __moduleparam_const __param_##name \ 221 static struct kernel_param __moduleparam_const __param_##name \
221 __used \ 222 __used \
222 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ 223 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
223 = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \ 224 = { __param_str_##name, THIS_MODULE, ops, \
224 level, flags, { arg } } 225 VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
225 226
226/* Obsolete - use module_param_cb() */ 227/* Obsolete - use module_param_cb() */
227#define module_param_call(name, set, get, arg, perm) \ 228#define module_param_call(name, set, get, arg, perm) \
228 static struct kernel_param_ops __param_ops_##name = \ 229 static const struct kernel_param_ops __param_ops_##name = \
229 { .flags = 0, (void *)set, (void *)get }; \ 230 { .flags = 0, (void *)set, (void *)get }; \
230 __module_param_call(MODULE_PARAM_PREFIX, \ 231 __module_param_call(MODULE_PARAM_PREFIX, \
231 name, &__param_ops_##name, arg, \ 232 name, &__param_ops_##name, arg, \
@@ -238,58 +239,14 @@ __check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
238 return 0; 239 return 0;
239} 240}
240 241
241/**
242 * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs.
243 * @name: the name of the parameter
244 *
245 * There's no point blocking write on a paramter that isn't writable via sysfs!
246 */
247#define kparam_block_sysfs_write(name) \
248 do { \
249 BUG_ON(!(__param_##name.perm & 0222)); \
250 __kernel_param_lock(); \
251 } while (0)
252
253/**
254 * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again.
255 * @name: the name of the parameter
256 */
257#define kparam_unblock_sysfs_write(name) \
258 do { \
259 BUG_ON(!(__param_##name.perm & 0222)); \
260 __kernel_param_unlock(); \
261 } while (0)
262
263/**
264 * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs.
265 * @name: the name of the parameter
266 *
267 * This also blocks sysfs writes.
268 */
269#define kparam_block_sysfs_read(name) \
270 do { \
271 BUG_ON(!(__param_##name.perm & 0444)); \
272 __kernel_param_lock(); \
273 } while (0)
274
275/**
276 * kparam_unblock_sysfs_read - allows sysfs to read a parameter again.
277 * @name: the name of the parameter
278 */
279#define kparam_unblock_sysfs_read(name) \
280 do { \
281 BUG_ON(!(__param_##name.perm & 0444)); \
282 __kernel_param_unlock(); \
283 } while (0)
284
285#ifdef CONFIG_SYSFS 242#ifdef CONFIG_SYSFS
286extern void __kernel_param_lock(void); 243extern void kernel_param_lock(struct module *mod);
287extern void __kernel_param_unlock(void); 244extern void kernel_param_unlock(struct module *mod);
288#else 245#else
289static inline void __kernel_param_lock(void) 246static inline void kernel_param_lock(struct module *mod)
290{ 247{
291} 248}
292static inline void __kernel_param_unlock(void) 249static inline void kernel_param_unlock(struct module *mod)
293{ 250{
294} 251}
295#endif 252#endif
@@ -310,6 +267,15 @@ static inline void __kernel_param_unlock(void)
310#define core_param(name, var, type, perm) \ 267#define core_param(name, var, type, perm) \
311 param_check_##type(name, &(var)); \ 268 param_check_##type(name, &(var)); \
312 __module_param_call("", name, &param_ops_##type, &var, perm, -1, 0) 269 __module_param_call("", name, &param_ops_##type, &var, perm, -1, 0)
270
271/**
272 * core_param_unsafe - same as core_param but taints kernel
273 */
274#define core_param_unsafe(name, var, type, perm) \
275 param_check_##type(name, &(var)); \
276 __module_param_call("", name, &param_ops_##type, &var, perm, \
277 -1, KERNEL_PARAM_FL_UNSAFE)
278
313#endif /* !MODULE */ 279#endif /* !MODULE */
314 280
315/** 281/**
@@ -357,8 +323,9 @@ extern char *parse_args(const char *name,
357 unsigned num, 323 unsigned num,
358 s16 level_min, 324 s16 level_min,
359 s16 level_max, 325 s16 level_max,
326 void *arg,
360 int (*unknown)(char *param, char *val, 327 int (*unknown)(char *param, char *val,
361 const char *doing)); 328 const char *doing, void *arg));
362 329
363/* Called by module remove. */ 330/* Called by module remove. */
364#ifdef CONFIG_SYSFS 331#ifdef CONFIG_SYSFS
@@ -376,64 +343,70 @@ static inline void destroy_params(const struct kernel_param *params,
376#define __param_check(name, p, type) \ 343#define __param_check(name, p, type) \
377 static inline type __always_unused *__check_##name(void) { return(p); } 344 static inline type __always_unused *__check_##name(void) { return(p); }
378 345
379extern struct kernel_param_ops param_ops_byte; 346extern const struct kernel_param_ops param_ops_byte;
380extern int param_set_byte(const char *val, const struct kernel_param *kp); 347extern int param_set_byte(const char *val, const struct kernel_param *kp);
381extern int param_get_byte(char *buffer, const struct kernel_param *kp); 348extern int param_get_byte(char *buffer, const struct kernel_param *kp);
382#define param_check_byte(name, p) __param_check(name, p, unsigned char) 349#define param_check_byte(name, p) __param_check(name, p, unsigned char)
383 350
384extern struct kernel_param_ops param_ops_short; 351extern const struct kernel_param_ops param_ops_short;
385extern int param_set_short(const char *val, const struct kernel_param *kp); 352extern int param_set_short(const char *val, const struct kernel_param *kp);
386extern int param_get_short(char *buffer, const struct kernel_param *kp); 353extern int param_get_short(char *buffer, const struct kernel_param *kp);
387#define param_check_short(name, p) __param_check(name, p, short) 354#define param_check_short(name, p) __param_check(name, p, short)
388 355
389extern struct kernel_param_ops param_ops_ushort; 356extern const struct kernel_param_ops param_ops_ushort;
390extern int param_set_ushort(const char *val, const struct kernel_param *kp); 357extern int param_set_ushort(const char *val, const struct kernel_param *kp);
391extern int param_get_ushort(char *buffer, const struct kernel_param *kp); 358extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
392#define param_check_ushort(name, p) __param_check(name, p, unsigned short) 359#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
393 360
394extern struct kernel_param_ops param_ops_int; 361extern const struct kernel_param_ops param_ops_int;
395extern int param_set_int(const char *val, const struct kernel_param *kp); 362extern int param_set_int(const char *val, const struct kernel_param *kp);
396extern int param_get_int(char *buffer, const struct kernel_param *kp); 363extern int param_get_int(char *buffer, const struct kernel_param *kp);
397#define param_check_int(name, p) __param_check(name, p, int) 364#define param_check_int(name, p) __param_check(name, p, int)
398 365
399extern struct kernel_param_ops param_ops_uint; 366extern const struct kernel_param_ops param_ops_uint;
400extern int param_set_uint(const char *val, const struct kernel_param *kp); 367extern int param_set_uint(const char *val, const struct kernel_param *kp);
401extern int param_get_uint(char *buffer, const struct kernel_param *kp); 368extern int param_get_uint(char *buffer, const struct kernel_param *kp);
402#define param_check_uint(name, p) __param_check(name, p, unsigned int) 369#define param_check_uint(name, p) __param_check(name, p, unsigned int)
403 370
404extern struct kernel_param_ops param_ops_long; 371extern const struct kernel_param_ops param_ops_long;
405extern int param_set_long(const char *val, const struct kernel_param *kp); 372extern int param_set_long(const char *val, const struct kernel_param *kp);
406extern int param_get_long(char *buffer, const struct kernel_param *kp); 373extern int param_get_long(char *buffer, const struct kernel_param *kp);
407#define param_check_long(name, p) __param_check(name, p, long) 374#define param_check_long(name, p) __param_check(name, p, long)
408 375
409extern struct kernel_param_ops param_ops_ulong; 376extern const struct kernel_param_ops param_ops_ulong;
410extern int param_set_ulong(const char *val, const struct kernel_param *kp); 377extern int param_set_ulong(const char *val, const struct kernel_param *kp);
411extern int param_get_ulong(char *buffer, const struct kernel_param *kp); 378extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
412#define param_check_ulong(name, p) __param_check(name, p, unsigned long) 379#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
413 380
414extern struct kernel_param_ops param_ops_ullong; 381extern const struct kernel_param_ops param_ops_ullong;
415extern int param_set_ullong(const char *val, const struct kernel_param *kp); 382extern int param_set_ullong(const char *val, const struct kernel_param *kp);
416extern int param_get_ullong(char *buffer, const struct kernel_param *kp); 383extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
417#define param_check_ullong(name, p) __param_check(name, p, unsigned long long) 384#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
418 385
419extern struct kernel_param_ops param_ops_charp; 386extern const struct kernel_param_ops param_ops_charp;
420extern int param_set_charp(const char *val, const struct kernel_param *kp); 387extern int param_set_charp(const char *val, const struct kernel_param *kp);
421extern int param_get_charp(char *buffer, const struct kernel_param *kp); 388extern int param_get_charp(char *buffer, const struct kernel_param *kp);
422#define param_check_charp(name, p) __param_check(name, p, char *) 389#define param_check_charp(name, p) __param_check(name, p, char *)
423 390
424/* We used to allow int as well as bool. We're taking that away! */ 391/* We used to allow int as well as bool. We're taking that away! */
425extern struct kernel_param_ops param_ops_bool; 392extern const struct kernel_param_ops param_ops_bool;
426extern int param_set_bool(const char *val, const struct kernel_param *kp); 393extern int param_set_bool(const char *val, const struct kernel_param *kp);
427extern int param_get_bool(char *buffer, const struct kernel_param *kp); 394extern int param_get_bool(char *buffer, const struct kernel_param *kp);
428#define param_check_bool(name, p) __param_check(name, p, bool) 395#define param_check_bool(name, p) __param_check(name, p, bool)
429 396
430extern struct kernel_param_ops param_ops_invbool; 397extern const struct kernel_param_ops param_ops_bool_enable_only;
398extern int param_set_bool_enable_only(const char *val,
399 const struct kernel_param *kp);
400/* getter is the same as for the regular bool */
401#define param_check_bool_enable_only param_check_bool
402
403extern const struct kernel_param_ops param_ops_invbool;
431extern int param_set_invbool(const char *val, const struct kernel_param *kp); 404extern int param_set_invbool(const char *val, const struct kernel_param *kp);
432extern int param_get_invbool(char *buffer, const struct kernel_param *kp); 405extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
433#define param_check_invbool(name, p) __param_check(name, p, bool) 406#define param_check_invbool(name, p) __param_check(name, p, bool)
434 407
435/* An int, which can only be set like a bool (though it shows as an int). */ 408/* An int, which can only be set like a bool (though it shows as an int). */
436extern struct kernel_param_ops param_ops_bint; 409extern const struct kernel_param_ops param_ops_bint;
437extern int param_set_bint(const char *val, const struct kernel_param *kp); 410extern int param_set_bint(const char *val, const struct kernel_param *kp);
438#define param_get_bint param_get_int 411#define param_get_bint param_get_int
439#define param_check_bint param_check_int 412#define param_check_bint param_check_int
@@ -477,9 +450,9 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
477 perm, -1, 0); \ 450 perm, -1, 0); \
478 __MODULE_PARM_TYPE(name, "array of " #type) 451 __MODULE_PARM_TYPE(name, "array of " #type)
479 452
480extern struct kernel_param_ops param_array_ops; 453extern const struct kernel_param_ops param_array_ops;
481 454
482extern struct kernel_param_ops param_ops_string; 455extern const struct kernel_param_ops param_ops_string;
483extern int param_set_copystring(const char *val, const struct kernel_param *); 456extern int param_set_copystring(const char *val, const struct kernel_param *);
484extern int param_get_string(char *buffer, const struct kernel_param *kp); 457extern int param_get_string(char *buffer, const struct kernel_param *kp);
485 458
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 299d7d31fe53..9b57a9b1b081 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -296,183 +296,19 @@ struct cfi_private {
296 struct flchip chips[0]; /* per-chip data structure for each chip */ 296 struct flchip chips[0]; /* per-chip data structure for each chip */
297}; 297};
298 298
299/* 299uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
300 * Returns the command address according to the given geometry. 300 struct map_info *map, struct cfi_private *cfi);
301 */
302static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
303 struct map_info *map, struct cfi_private *cfi)
304{
305 unsigned bankwidth = map_bankwidth(map);
306 unsigned interleave = cfi_interleave(cfi);
307 unsigned type = cfi->device_type;
308 uint32_t addr;
309
310 addr = (cmd_ofs * type) * interleave;
311
312 /* Modify the unlock address if we are in compatibility mode.
313 * For 16bit devices on 8 bit busses
314 * and 32bit devices on 16 bit busses
315 * set the low bit of the alternating bit sequence of the address.
316 */
317 if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
318 addr |= (type >> 1)*interleave;
319
320 return addr;
321}
322
323/*
324 * Transforms the CFI command for the given geometry (bus width & interleave).
325 * It looks too long to be inline, but in the common case it should almost all
326 * get optimised away.
327 */
328static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
329{
330 map_word val = { {0} };
331 int wordwidth, words_per_bus, chip_mode, chips_per_word;
332 unsigned long onecmd;
333 int i;
334
335 /* We do it this way to give the compiler a fighting chance
336 of optimising away all the crap for 'bankwidth' larger than
337 an unsigned long, in the common case where that support is
338 disabled */
339 if (map_bankwidth_is_large(map)) {
340 wordwidth = sizeof(unsigned long);
341 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
342 } else {
343 wordwidth = map_bankwidth(map);
344 words_per_bus = 1;
345 }
346
347 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
348 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
349
350 /* First, determine what the bit-pattern should be for a single
351 device, according to chip mode and endianness... */
352 switch (chip_mode) {
353 default: BUG();
354 case 1:
355 onecmd = cmd;
356 break;
357 case 2:
358 onecmd = cpu_to_cfi16(map, cmd);
359 break;
360 case 4:
361 onecmd = cpu_to_cfi32(map, cmd);
362 break;
363 }
364
365 /* Now replicate it across the size of an unsigned long, or
366 just to the bus width as appropriate */
367 switch (chips_per_word) {
368 default: BUG();
369#if BITS_PER_LONG >= 64
370 case 8:
371 onecmd |= (onecmd << (chip_mode * 32));
372#endif
373 case 4:
374 onecmd |= (onecmd << (chip_mode * 16));
375 case 2:
376 onecmd |= (onecmd << (chip_mode * 8));
377 case 1:
378 ;
379 }
380 301
381 /* And finally, for the multi-word case, replicate it 302map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
382 in all words in the structure */
383 for (i=0; i < words_per_bus; i++) {
384 val.x[i] = onecmd;
385 }
386
387 return val;
388}
389#define CMD(x) cfi_build_cmd((x), map, cfi) 303#define CMD(x) cfi_build_cmd((x), map, cfi)
390 304
391 305unsigned long cfi_merge_status(map_word val, struct map_info *map,
392static inline unsigned long cfi_merge_status(map_word val, struct map_info *map, 306 struct cfi_private *cfi);
393 struct cfi_private *cfi)
394{
395 int wordwidth, words_per_bus, chip_mode, chips_per_word;
396 unsigned long onestat, res = 0;
397 int i;
398
399 /* We do it this way to give the compiler a fighting chance
400 of optimising away all the crap for 'bankwidth' larger than
401 an unsigned long, in the common case where that support is
402 disabled */
403 if (map_bankwidth_is_large(map)) {
404 wordwidth = sizeof(unsigned long);
405 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
406 } else {
407 wordwidth = map_bankwidth(map);
408 words_per_bus = 1;
409 }
410
411 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
412 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
413
414 onestat = val.x[0];
415 /* Or all status words together */
416 for (i=1; i < words_per_bus; i++) {
417 onestat |= val.x[i];
418 }
419
420 res = onestat;
421 switch(chips_per_word) {
422 default: BUG();
423#if BITS_PER_LONG >= 64
424 case 8:
425 res |= (onestat >> (chip_mode * 32));
426#endif
427 case 4:
428 res |= (onestat >> (chip_mode * 16));
429 case 2:
430 res |= (onestat >> (chip_mode * 8));
431 case 1:
432 ;
433 }
434
435 /* Last, determine what the bit-pattern should be for a single
436 device, according to chip mode and endianness... */
437 switch (chip_mode) {
438 case 1:
439 break;
440 case 2:
441 res = cfi16_to_cpu(map, res);
442 break;
443 case 4:
444 res = cfi32_to_cpu(map, res);
445 break;
446 default: BUG();
447 }
448 return res;
449}
450
451#define MERGESTATUS(x) cfi_merge_status((x), map, cfi) 307#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
452 308
453 309uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
454/*
455 * Sends a CFI command to a bank of flash for the given geometry.
456 *
457 * Returns the offset in flash where the command was written.
458 * If prev_val is non-null, it will be set to the value at the command address,
459 * before the command was written.
460 */
461static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
462 struct map_info *map, struct cfi_private *cfi, 310 struct map_info *map, struct cfi_private *cfi,
463 int type, map_word *prev_val) 311 int type, map_word *prev_val);
464{
465 map_word val;
466 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
467 val = cfi_build_cmd(cmd, map, cfi);
468
469 if (prev_val)
470 *prev_val = map_read(map, addr);
471
472 map_write(map, val, addr);
473
474 return addr - base;
475}
476 312
477static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) 313static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
478{ 314{
@@ -506,15 +342,7 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
506 } 342 }
507} 343}
508 344
509static inline void cfi_udelay(int us) 345void cfi_udelay(int us);
510{
511 if (us >= 1000) {
512 msleep((us+999)/1000);
513 } else {
514 udelay(us);
515 cond_resched();
516 }
517}
518 346
519int __xipram cfi_qry_present(struct map_info *map, __u32 base, 347int __xipram cfi_qry_present(struct map_info *map, __u32 base,
520 struct cfi_private *cfi); 348 struct cfi_private *cfi);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3d4ea7eb2b68..272f42952f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -26,6 +26,8 @@
26 26
27struct mtd_info; 27struct mtd_info;
28struct nand_flash_dev; 28struct nand_flash_dev;
29struct device_node;
30
29/* Scan and identify a NAND device */ 31/* Scan and identify a NAND device */
30extern int nand_scan(struct mtd_info *mtd, int max_chips); 32extern int nand_scan(struct mtd_info *mtd, int max_chips);
31/* 33/*
@@ -176,17 +178,17 @@ typedef enum {
176/* Chip may not exist, so silence any errors in scan */ 178/* Chip may not exist, so silence any errors in scan */
177#define NAND_SCAN_SILENT_NODEV 0x00040000 179#define NAND_SCAN_SILENT_NODEV 0x00040000
178/* 180/*
179 * This option could be defined by controller drivers to protect against
180 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
181 */
182#define NAND_USE_BOUNCE_BUFFER 0x00080000
183/*
184 * Autodetect nand buswidth with readid/onfi. 181 * Autodetect nand buswidth with readid/onfi.
185 * This suppose the driver will configure the hardware in 8 bits mode 182 * This suppose the driver will configure the hardware in 8 bits mode
186 * when calling nand_scan_ident, and update its configuration 183 * when calling nand_scan_ident, and update its configuration
187 * before calling nand_scan_tail. 184 * before calling nand_scan_tail.
188 */ 185 */
189#define NAND_BUSWIDTH_AUTO 0x00080000 186#define NAND_BUSWIDTH_AUTO 0x00080000
187/*
188 * This option could be defined by controller drivers to protect against
189 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
190 */
191#define NAND_USE_BOUNCE_BUFFER 0x00100000
190 192
191/* Options set by nand scan */ 193/* Options set by nand scan */
192/* Nand scan has allocated controller struct */ 194/* Nand scan has allocated controller struct */
@@ -542,6 +544,7 @@ struct nand_buffers {
542 * flash device 544 * flash device
543 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the 545 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
544 * flash device. 546 * flash device.
547 * @dn: [BOARDSPECIFIC] device node describing this instance
545 * @read_byte: [REPLACEABLE] read one byte from the chip 548 * @read_byte: [REPLACEABLE] read one byte from the chip
546 * @read_word: [REPLACEABLE] read one word from the chip 549 * @read_word: [REPLACEABLE] read one word from the chip
547 * @write_byte: [REPLACEABLE] write a single byte to the chip on the 550 * @write_byte: [REPLACEABLE] write a single byte to the chip on the
@@ -644,6 +647,8 @@ struct nand_chip {
644 void __iomem *IO_ADDR_R; 647 void __iomem *IO_ADDR_R;
645 void __iomem *IO_ADDR_W; 648 void __iomem *IO_ADDR_W;
646 649
650 struct device_node *dn;
651
647 uint8_t (*read_byte)(struct mtd_info *mtd); 652 uint8_t (*read_byte)(struct mtd_info *mtd);
648 u16 (*read_word)(struct mtd_info *mtd); 653 u16 (*read_word)(struct mtd_info *mtd);
649 void (*write_byte)(struct mtd_info *mtd, uint8_t byte); 654 void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
@@ -833,7 +838,6 @@ struct nand_manufacturers {
833extern struct nand_flash_dev nand_flash_ids[]; 838extern struct nand_flash_dev nand_flash_ids[];
834extern struct nand_manufacturers nand_manuf_ids[]; 839extern struct nand_manufacturers nand_manuf_ids[];
835 840
836extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
837extern int nand_default_bbt(struct mtd_info *mtd); 841extern int nand_default_bbt(struct mtd_info *mtd);
838extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); 842extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
839extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); 843extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
diff --git a/include/linux/nd.h b/include/linux/nd.h
new file mode 100644
index 000000000000..507e47c86737
--- /dev/null
+++ b/include/linux/nd.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __LINUX_ND_H__
14#define __LINUX_ND_H__
15#include <linux/fs.h>
16#include <linux/ndctl.h>
17#include <linux/device.h>
18
19struct nd_device_driver {
20 struct device_driver drv;
21 unsigned long type;
22 int (*probe)(struct device *dev);
23 int (*remove)(struct device *dev);
24};
25
26static inline struct nd_device_driver *to_nd_device_driver(
27 struct device_driver *drv)
28{
29 return container_of(drv, struct nd_device_driver, drv);
30};
31
32/**
33 * struct nd_namespace_common - core infrastructure of a namespace
34 * @force_raw: ignore other personalities for the namespace (e.g. btt)
35 * @dev: device model node
36 * @claim: when set a another personality has taken ownership of the namespace
37 * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
38 */
39struct nd_namespace_common {
40 int force_raw;
41 struct device dev;
42 struct device *claim;
43 int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
44 void *buf, size_t size, int rw);
45};
46
47static inline struct nd_namespace_common *to_ndns(struct device *dev)
48{
49 return container_of(dev, struct nd_namespace_common, dev);
50}
51
52/**
53 * struct nd_namespace_io - infrastructure for loading an nd_pmem instance
54 * @dev: namespace device created by the nd region driver
55 * @res: struct resource conversion of a NFIT SPA table
56 */
57struct nd_namespace_io {
58 struct nd_namespace_common common;
59 struct resource res;
60};
61
62/**
63 * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
64 * @nsio: device and system physical address range to drive
65 * @alt_name: namespace name supplied in the dimm label
66 * @uuid: namespace name supplied in the dimm label
67 */
68struct nd_namespace_pmem {
69 struct nd_namespace_io nsio;
70 char *alt_name;
71 u8 *uuid;
72};
73
74/**
75 * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
76 * @alt_name: namespace name supplied in the dimm label
77 * @uuid: namespace name supplied in the dimm label
78 * @id: ida allocated id
79 * @lbasize: blk namespaces have a native sector size when btt not present
80 * @num_resources: number of dpa extents to claim
81 * @res: discontiguous dpa extents for given dimm
82 */
83struct nd_namespace_blk {
84 struct nd_namespace_common common;
85 char *alt_name;
86 u8 *uuid;
87 int id;
88 unsigned long lbasize;
89 int num_resources;
90 struct resource **res;
91};
92
93static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev)
94{
95 return container_of(dev, struct nd_namespace_io, common.dev);
96}
97
98static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev)
99{
100 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
101
102 return container_of(nsio, struct nd_namespace_pmem, nsio);
103}
104
105static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev)
106{
107 return container_of(dev, struct nd_namespace_blk, common.dev);
108}
109
110/**
111 * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
112 * @ndns: device to read
113 * @offset: namespace-relative starting offset
114 * @buf: buffer to fill
115 * @size: transfer length
116 *
117 * @buf is up-to-date upon return from this routine.
118 */
119static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
120 resource_size_t offset, void *buf, size_t size)
121{
122 return ndns->rw_bytes(ndns, offset, buf, size, READ);
123}
124
125/**
126 * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
127 * @ndns: device to read
128 * @offset: namespace-relative starting offset
129 * @buf: buffer to drain
130 * @size: transfer length
131 *
132 * NVDIMM Namepaces disks do not implement sectors internally. Depending on
133 * the @ndns, the contents of @buf may be in cpu cache, platform buffers,
134 * or on backing memory media upon return from this routine. Flushing
135 * to media is handled internal to the @ndns driver, if at all.
136 */
137static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
138 resource_size_t offset, void *buf, size_t size)
139{
140 return ndns->rw_bytes(ndns, offset, buf, size, WRITE);
141}
142
143#define MODULE_ALIAS_ND_DEVICE(type) \
144 MODULE_ALIAS("nd:t" __stringify(type) "*")
145#define ND_DEVICE_MODALIAS_FMT "nd:t%d"
146
147int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
148 struct module *module, const char *mod_name);
149#define nd_driver_register(driver) \
150 __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
151#endif /* __LINUX_ND_H__ */
diff --git a/include/linux/net.h b/include/linux/net.h
index 738ea48be889..04aa06852771 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -38,7 +38,6 @@ struct net;
38#define SOCK_NOSPACE 2 38#define SOCK_NOSPACE 2
39#define SOCK_PASSCRED 3 39#define SOCK_PASSCRED 3
40#define SOCK_PASSSEC 4 40#define SOCK_PASSSEC 4
41#define SOCK_EXTERNALLY_ALLOCATED 5
42 41
43#ifndef ARCH_HAS_SOCKET_TYPES 42#ifndef ARCH_HAS_SOCKET_TYPES
44/** 43/**
@@ -208,7 +207,7 @@ void sock_unregister(int family);
208int __sock_create(struct net *net, int family, int type, int proto, 207int __sock_create(struct net *net, int family, int type, int proto,
209 struct socket **res, int kern); 208 struct socket **res, int kern);
210int sock_create(int family, int type, int proto, struct socket **res); 209int sock_create(int family, int type, int proto, struct socket **res);
211int sock_create_kern(int family, int type, int proto, struct socket **res); 210int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
212int sock_create_lite(int family, int type, int proto, struct socket **res); 211int sock_create_lite(int family, int type, int proto, struct socket **res);
213void sock_release(struct socket *sock); 212void sock_release(struct socket *sock);
214int sock_sendmsg(struct socket *sock, struct msghdr *msg); 213int sock_sendmsg(struct socket *sock, struct msghdr *msg);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 7d59dc6ab789..9672781c593d 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,7 +66,6 @@ enum {
66 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ 66 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ 67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */ 68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */
69 NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
70 69
71 /* 70 /*
72 * Add your fresh new feature above and remember to update 71 * Add your fresh new feature above and remember to update
@@ -125,7 +124,6 @@ enum {
125#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
126#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) 125#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
127#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) 126#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
128#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
129 127
130/* Features valid for ethtool to change */ 128/* Features valid for ethtool to change */
131/* = all defined minus driver/device-class-related */ 129/* = all defined minus driver/device-class-related */
@@ -161,8 +159,7 @@ enum {
161 */ 159 */
162#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ 160#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
163 NETIF_F_SG | NETIF_F_HIGHDMA | \ 161 NETIF_F_SG | NETIF_F_HIGHDMA | \
164 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \ 162 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
165 NETIF_F_HW_SWITCH_OFFLOAD)
166 163
167/* 164/*
168 * If one device doesn't support one of these features, then disable it 165 * If one device doesn't support one of these features, then disable it
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 05b9a694e213..e20979dfd6a9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1100,6 +1100,10 @@ struct net_device_ops {
1100 struct ifla_vf_info *ivf); 1100 struct ifla_vf_info *ivf);
1101 int (*ndo_set_vf_link_state)(struct net_device *dev, 1101 int (*ndo_set_vf_link_state)(struct net_device *dev,
1102 int vf, int link_state); 1102 int vf, int link_state);
1103 int (*ndo_get_vf_stats)(struct net_device *dev,
1104 int vf,
1105 struct ifla_vf_stats
1106 *vf_stats);
1103 int (*ndo_set_vf_port)(struct net_device *dev, 1107 int (*ndo_set_vf_port)(struct net_device *dev,
1104 int vf, 1108 int vf,
1105 struct nlattr *port[]); 1109 struct nlattr *port[]);
@@ -1564,7 +1568,7 @@ struct net_device {
1564 const struct net_device_ops *netdev_ops; 1568 const struct net_device_ops *netdev_ops;
1565 const struct ethtool_ops *ethtool_ops; 1569 const struct ethtool_ops *ethtool_ops;
1566#ifdef CONFIG_NET_SWITCHDEV 1570#ifdef CONFIG_NET_SWITCHDEV
1567 const struct swdev_ops *swdev_ops; 1571 const struct switchdev_ops *switchdev_ops;
1568#endif 1572#endif
1569 1573
1570 const struct header_ops *header_ops; 1574 const struct header_ops *header_ops;
@@ -1652,7 +1656,14 @@ struct net_device {
1652 rx_handler_func_t __rcu *rx_handler; 1656 rx_handler_func_t __rcu *rx_handler;
1653 void __rcu *rx_handler_data; 1657 void __rcu *rx_handler_data;
1654 1658
1659#ifdef CONFIG_NET_CLS_ACT
1660 struct tcf_proto __rcu *ingress_cl_list;
1661#endif
1655 struct netdev_queue __rcu *ingress_queue; 1662 struct netdev_queue __rcu *ingress_queue;
1663#ifdef CONFIG_NETFILTER_INGRESS
1664 struct list_head nf_hooks_ingress;
1665#endif
1666
1656 unsigned char broadcast[MAX_ADDR_LEN]; 1667 unsigned char broadcast[MAX_ADDR_LEN];
1657#ifdef CONFIG_RFS_ACCEL 1668#ifdef CONFIG_RFS_ACCEL
1658 struct cpu_rmap *rx_cpu_rmap; 1669 struct cpu_rmap *rx_cpu_rmap;
@@ -1990,6 +2001,7 @@ struct offload_callbacks {
1990 2001
1991struct packet_offload { 2002struct packet_offload {
1992 __be16 type; /* This is really htons(ether_type). */ 2003 __be16 type; /* This is really htons(ether_type). */
2004 u16 priority;
1993 struct offload_callbacks callbacks; 2005 struct offload_callbacks callbacks;
1994 struct list_head list; 2006 struct list_head list;
1995}; 2007};
@@ -2552,10 +2564,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
2552 2564
2553static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 2565static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2554{ 2566{
2555 if (WARN_ON(!dev_queue)) {
2556 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2557 return;
2558 }
2559 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2567 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2560} 2568}
2561 2569
@@ -2571,15 +2579,7 @@ static inline void netif_stop_queue(struct net_device *dev)
2571 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 2579 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2572} 2580}
2573 2581
2574static inline void netif_tx_stop_all_queues(struct net_device *dev) 2582void netif_tx_stop_all_queues(struct net_device *dev);
2575{
2576 unsigned int i;
2577
2578 for (i = 0; i < dev->num_tx_queues; i++) {
2579 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2580 netif_tx_stop_queue(txq);
2581 }
2582}
2583 2583
2584static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 2584static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2585{ 2585{
@@ -2840,6 +2840,9 @@ static inline int netif_set_xps_queue(struct net_device *dev,
2840} 2840}
2841#endif 2841#endif
2842 2842
2843u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2844 unsigned int num_tx_queues);
2845
2843/* 2846/*
2844 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used 2847 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2845 * as a distribution range limit for the returned value. 2848 * as a distribution range limit for the returned value.
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 63560d0a8dfe..00050dfd9f23 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -10,7 +10,8 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/static_key.h> 12#include <linux/static_key.h>
13#include <uapi/linux/netfilter.h> 13#include <linux/netfilter_defs.h>
14
14#ifdef CONFIG_NETFILTER 15#ifdef CONFIG_NETFILTER
15static inline int NF_DROP_GETERR(int verdict) 16static inline int NF_DROP_GETERR(int verdict)
16{ 17{
@@ -38,9 +39,6 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
38 39
39int netfilter_init(void); 40int netfilter_init(void);
40 41
41/* Largest hook number + 1 */
42#define NF_MAX_HOOKS 8
43
44struct sk_buff; 42struct sk_buff;
45 43
46struct nf_hook_ops; 44struct nf_hook_ops;
@@ -54,10 +52,12 @@ struct nf_hook_state {
54 struct net_device *in; 52 struct net_device *in;
55 struct net_device *out; 53 struct net_device *out;
56 struct sock *sk; 54 struct sock *sk;
55 struct list_head *hook_list;
57 int (*okfn)(struct sock *, struct sk_buff *); 56 int (*okfn)(struct sock *, struct sk_buff *);
58}; 57};
59 58
60static inline void nf_hook_state_init(struct nf_hook_state *p, 59static inline void nf_hook_state_init(struct nf_hook_state *p,
60 struct list_head *hook_list,
61 unsigned int hook, 61 unsigned int hook,
62 int thresh, u_int8_t pf, 62 int thresh, u_int8_t pf,
63 struct net_device *indev, 63 struct net_device *indev,
@@ -71,6 +71,7 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
71 p->in = indev; 71 p->in = indev;
72 p->out = outdev; 72 p->out = outdev;
73 p->sk = sk; 73 p->sk = sk;
74 p->hook_list = hook_list;
74 p->okfn = okfn; 75 p->okfn = okfn;
75} 76}
76 77
@@ -79,16 +80,17 @@ typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
79 const struct nf_hook_state *state); 80 const struct nf_hook_state *state);
80 81
81struct nf_hook_ops { 82struct nf_hook_ops {
82 struct list_head list; 83 struct list_head list;
83 84
84 /* User fills in from here down. */ 85 /* User fills in from here down. */
85 nf_hookfn *hook; 86 nf_hookfn *hook;
86 struct module *owner; 87 struct net_device *dev;
87 void *priv; 88 struct module *owner;
88 u_int8_t pf; 89 void *priv;
89 unsigned int hooknum; 90 u_int8_t pf;
91 unsigned int hooknum;
90 /* Hooks are ordered in ascending priority. */ 92 /* Hooks are ordered in ascending priority. */
91 int priority; 93 int priority;
92}; 94};
93 95
94struct nf_sockopt_ops { 96struct nf_sockopt_ops {
@@ -131,26 +133,33 @@ extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
131#ifdef HAVE_JUMP_LABEL 133#ifdef HAVE_JUMP_LABEL
132extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 134extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
133 135
134static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 136static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
137 u_int8_t pf, unsigned int hook)
135{ 138{
136 if (__builtin_constant_p(pf) && 139 if (__builtin_constant_p(pf) &&
137 __builtin_constant_p(hook)) 140 __builtin_constant_p(hook))
138 return static_key_false(&nf_hooks_needed[pf][hook]); 141 return static_key_false(&nf_hooks_needed[pf][hook]);
139 142
140 return !list_empty(&nf_hooks[pf][hook]); 143 return !list_empty(nf_hook_list);
141} 144}
142#else 145#else
143static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 146static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
147 u_int8_t pf, unsigned int hook)
144{ 148{
145 return !list_empty(&nf_hooks[pf][hook]); 149 return !list_empty(nf_hook_list);
146} 150}
147#endif 151#endif
148 152
153static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
154{
155 return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
156}
157
149int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); 158int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
150 159
151/** 160/**
152 * nf_hook_thresh - call a netfilter hook 161 * nf_hook_thresh - call a netfilter hook
153 * 162 *
154 * Returns 1 if the hook has allowed the packet to pass. The function 163 * Returns 1 if the hook has allowed the packet to pass. The function
155 * okfn must be invoked by the caller in this case. Any other return 164 * okfn must be invoked by the caller in this case. Any other return
156 * value indicates the packet has been consumed by the hook. 165 * value indicates the packet has been consumed by the hook.
@@ -166,8 +175,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
166 if (nf_hooks_active(pf, hook)) { 175 if (nf_hooks_active(pf, hook)) {
167 struct nf_hook_state state; 176 struct nf_hook_state state;
168 177
169 nf_hook_state_init(&state, hook, thresh, pf, 178 nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
170 indev, outdev, sk, okfn); 179 pf, indev, outdev, sk, okfn);
171 return nf_hook_slow(skb, &state); 180 return nf_hook_slow(skb, &state);
172 } 181 }
173 return 1; 182 return 1;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34b172301558..48bb01edcf30 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -108,8 +108,13 @@ struct ip_set_counter {
108 atomic64_t packets; 108 atomic64_t packets;
109}; 109};
110 110
111struct ip_set_comment_rcu {
112 struct rcu_head rcu;
113 char str[0];
114};
115
111struct ip_set_comment { 116struct ip_set_comment {
112 char *str; 117 struct ip_set_comment_rcu __rcu *c;
113}; 118};
114 119
115struct ip_set_skbinfo { 120struct ip_set_skbinfo {
@@ -122,13 +127,13 @@ struct ip_set_skbinfo {
122struct ip_set; 127struct ip_set;
123 128
124#define ext_timeout(e, s) \ 129#define ext_timeout(e, s) \
125(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]) 130((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
126#define ext_counter(e, s) \ 131#define ext_counter(e, s) \
127(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]) 132((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
128#define ext_comment(e, s) \ 133#define ext_comment(e, s) \
129(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]) 134((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
130#define ext_skbinfo(e, s) \ 135#define ext_skbinfo(e, s) \
131(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]) 136((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
132 137
133typedef int (*ipset_adtfn)(struct ip_set *set, void *value, 138typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
134 const struct ip_set_ext *ext, 139 const struct ip_set_ext *ext,
@@ -176,6 +181,9 @@ struct ip_set_type_variant {
176 /* List elements */ 181 /* List elements */
177 int (*list)(const struct ip_set *set, struct sk_buff *skb, 182 int (*list)(const struct ip_set *set, struct sk_buff *skb,
178 struct netlink_callback *cb); 183 struct netlink_callback *cb);
184 /* Keep listing private when resizing runs parallel */
185 void (*uref)(struct ip_set *set, struct netlink_callback *cb,
186 bool start);
179 187
180 /* Return true if "b" set is the same as "a" 188 /* Return true if "b" set is the same as "a"
181 * according to the create set parameters */ 189 * according to the create set parameters */
@@ -223,7 +231,7 @@ struct ip_set {
223 /* The name of the set */ 231 /* The name of the set */
224 char name[IPSET_MAXNAMELEN]; 232 char name[IPSET_MAXNAMELEN];
225 /* Lock protecting the set data */ 233 /* Lock protecting the set data */
226 rwlock_t lock; 234 spinlock_t lock;
227 /* References to the set */ 235 /* References to the set */
228 u32 ref; 236 u32 ref;
229 /* The core set type */ 237 /* The core set type */
@@ -341,12 +349,11 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
341 cpu_to_be64((u64)skbinfo->skbmark << 32 | 349 cpu_to_be64((u64)skbinfo->skbmark << 32 |
342 skbinfo->skbmarkmask))) || 350 skbinfo->skbmarkmask))) ||
343 (skbinfo->skbprio && 351 (skbinfo->skbprio &&
344 nla_put_net32(skb, IPSET_ATTR_SKBPRIO, 352 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
345 cpu_to_be32(skbinfo->skbprio))) || 353 cpu_to_be32(skbinfo->skbprio))) ||
346 (skbinfo->skbqueue && 354 (skbinfo->skbqueue &&
347 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, 355 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
348 cpu_to_be16(skbinfo->skbqueue))); 356 cpu_to_be16(skbinfo->skbqueue)));
349
350} 357}
351 358
352static inline void 359static inline void
@@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter,
380 387
381/* Netlink CB args */ 388/* Netlink CB args */
382enum { 389enum {
383 IPSET_CB_NET = 0, 390 IPSET_CB_NET = 0, /* net namespace */
384 IPSET_CB_DUMP, 391 IPSET_CB_DUMP, /* dump single set/all sets */
385 IPSET_CB_INDEX, 392 IPSET_CB_INDEX, /* set index */
386 IPSET_CB_ARG0, 393 IPSET_CB_PRIVATE, /* set private data */
394 IPSET_CB_ARG0, /* type specific */
387 IPSET_CB_ARG1, 395 IPSET_CB_ARG1,
388 IPSET_CB_ARG2,
389}; 396};
390 397
391/* register and unregister set references */ 398/* register and unregister set references */
@@ -533,29 +540,9 @@ bitmap_bytes(u32 a, u32 b)
533#include <linux/netfilter/ipset/ip_set_timeout.h> 540#include <linux/netfilter/ipset/ip_set_timeout.h>
534#include <linux/netfilter/ipset/ip_set_comment.h> 541#include <linux/netfilter/ipset/ip_set_comment.h>
535 542
536static inline int 543int
537ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, 544ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
538 const void *e, bool active) 545 const void *e, bool active);
539{
540 if (SET_WITH_TIMEOUT(set)) {
541 unsigned long *timeout = ext_timeout(e, set);
542
543 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
544 htonl(active ? ip_set_timeout_get(timeout)
545 : *timeout)))
546 return -EMSGSIZE;
547 }
548 if (SET_WITH_COUNTER(set) &&
549 ip_set_put_counter(skb, ext_counter(e, set)))
550 return -EMSGSIZE;
551 if (SET_WITH_COMMENT(set) &&
552 ip_set_put_comment(skb, ext_comment(e, set)))
553 return -EMSGSIZE;
554 if (SET_WITH_SKBINFO(set) &&
555 ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
556 return -EMSGSIZE;
557 return 0;
558}
559 546
560#define IP_SET_INIT_KEXT(skb, opt, set) \ 547#define IP_SET_INIT_KEXT(skb, opt, set) \
561 { .bytes = (skb)->len, .packets = 1, \ 548 { .bytes = (skb)->len, .packets = 1, \
@@ -565,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
565 { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ 552 { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
566 .timeout = (set)->timeout } 553 .timeout = (set)->timeout }
567 554
568#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
569
570#define IPSET_CONCAT(a, b) a##b 555#define IPSET_CONCAT(a, b) a##b
571#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b) 556#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
572 557
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 21217ea008d7..8d0248525957 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb)
16 return nla_data(tb); 16 return nla_data(tb);
17} 17}
18 18
19/* Called from uadd only, protected by the set spinlock.
20 * The kadt functions don't use the comment extensions in any way.
21 */
19static inline void 22static inline void
20ip_set_init_comment(struct ip_set_comment *comment, 23ip_set_init_comment(struct ip_set_comment *comment,
21 const struct ip_set_ext *ext) 24 const struct ip_set_ext *ext)
22{ 25{
26 struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
23 size_t len = ext->comment ? strlen(ext->comment) : 0; 27 size_t len = ext->comment ? strlen(ext->comment) : 0;
24 28
25 if (unlikely(comment->str)) { 29 if (unlikely(c)) {
26 kfree(comment->str); 30 kfree_rcu(c, rcu);
27 comment->str = NULL; 31 rcu_assign_pointer(comment->c, NULL);
28 } 32 }
29 if (!len) 33 if (!len)
30 return; 34 return;
31 if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) 35 if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
32 len = IPSET_MAX_COMMENT_SIZE; 36 len = IPSET_MAX_COMMENT_SIZE;
33 comment->str = kzalloc(len + 1, GFP_ATOMIC); 37 c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
34 if (unlikely(!comment->str)) 38 if (unlikely(!c))
35 return; 39 return;
36 strlcpy(comment->str, ext->comment, len + 1); 40 strlcpy(c->str, ext->comment, len + 1);
41 rcu_assign_pointer(comment->c, c);
37} 42}
38 43
44/* Used only when dumping a set, protected by rcu_read_lock_bh() */
39static inline int 45static inline int
40ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) 46ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
41{ 47{
42 if (!comment->str) 48 struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
49
50 if (!c)
43 return 0; 51 return 0;
44 return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str); 52 return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
45} 53}
46 54
55/* Called from uadd/udel, flush or the garbage collectors protected
56 * by the set spinlock.
57 * Called when the set is destroyed and when there can't be any user
58 * of the set data anymore.
59 */
47static inline void 60static inline void
48ip_set_comment_free(struct ip_set_comment *comment) 61ip_set_comment_free(struct ip_set_comment *comment)
49{ 62{
50 if (unlikely(!comment->str)) 63 struct ip_set_comment_rcu *c;
64
65 c = rcu_dereference_protected(comment->c, 1);
66 if (unlikely(!c))
51 return; 67 return;
52 kfree(comment->str); 68 kfree_rcu(c, rcu);
53 comment->str = NULL; 69 rcu_assign_pointer(comment->c, NULL);
54} 70}
55 71
56#endif 72#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 83c2f9e0886c..1d6a935c1ac5 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb)
40} 40}
41 41
42static inline bool 42static inline bool
43ip_set_timeout_test(unsigned long timeout) 43ip_set_timeout_expired(unsigned long *t)
44{ 44{
45 return timeout == IPSET_ELEM_PERMANENT || 45 return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
46 time_is_after_jiffies(timeout);
47}
48
49static inline bool
50ip_set_timeout_expired(unsigned long *timeout)
51{
52 return *timeout != IPSET_ELEM_PERMANENT &&
53 time_is_before_jiffies(*timeout);
54} 46}
55 47
56static inline void 48static inline void
57ip_set_timeout_set(unsigned long *timeout, u32 t) 49ip_set_timeout_set(unsigned long *timeout, u32 value)
58{ 50{
59 if (!t) { 51 unsigned long t;
52
53 if (!value) {
60 *timeout = IPSET_ELEM_PERMANENT; 54 *timeout = IPSET_ELEM_PERMANENT;
61 return; 55 return;
62 } 56 }
63 57
64 *timeout = msecs_to_jiffies(t * 1000) + jiffies; 58 t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
65 if (*timeout == IPSET_ELEM_PERMANENT) 59 if (t == IPSET_ELEM_PERMANENT)
66 /* Bingo! :-) */ 60 /* Bingo! :-) */
67 (*timeout)--; 61 t--;
62 *timeout = t;
68} 63}
69 64
70static inline u32 65static inline u32
71ip_set_timeout_get(unsigned long *timeout) 66ip_set_timeout_get(unsigned long *timeout)
72{ 67{
73 return *timeout == IPSET_ELEM_PERMANENT ? 0 : 68 return *timeout == IPSET_ELEM_PERMANENT ? 0 :
74 jiffies_to_msecs(*timeout - jiffies)/1000; 69 jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
75} 70}
76 71
77#endif /* __KERNEL__ */ 72#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index a3e215bb0241..286098a5667f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -62,6 +62,7 @@ struct xt_mtchk_param {
62 void *matchinfo; 62 void *matchinfo;
63 unsigned int hook_mask; 63 unsigned int hook_mask;
64 u_int8_t family; 64 u_int8_t family;
65 bool nft_compat;
65}; 66};
66 67
67/** 68/**
@@ -92,6 +93,7 @@ struct xt_tgchk_param {
92 void *targinfo; 93 void *targinfo;
93 unsigned int hook_mask; 94 unsigned int hook_mask;
94 u_int8_t family; 95 u_int8_t family;
96 bool nft_compat;
95}; 97};
96 98
97/* Target destructor parameters */ 99/* Target destructor parameters */
@@ -222,13 +224,10 @@ struct xt_table_info {
222 unsigned int stacksize; 224 unsigned int stacksize;
223 unsigned int __percpu *stackptr; 225 unsigned int __percpu *stackptr;
224 void ***jumpstack; 226 void ***jumpstack;
225 /* ipt_entry tables: one per CPU */ 227
226 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ 228 unsigned char entries[0] __aligned(8);
227 void *entries[1];
228}; 229};
229 230
230#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
231 + nr_cpu_ids * sizeof(char *))
232int xt_register_target(struct xt_target *target); 231int xt_register_target(struct xt_target *target);
233void xt_unregister_target(struct xt_target *target); 232void xt_unregister_target(struct xt_target *target);
234int xt_register_targets(struct xt_target *target, unsigned int n); 233int xt_register_targets(struct xt_target *target, unsigned int n);
@@ -351,6 +350,57 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
351 return ret; 350 return ret;
352} 351}
353 352
353
354/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
355 * real (percpu) counter. On !SMP, its just the packet count,
356 * so nothing needs to be done there.
357 *
358 * xt_percpu_counter_alloc returns the address of the percpu
359 * counter, or 0 on !SMP. We force an alignment of 16 bytes
360 * so that bytes/packets share a common cache line.
361 *
362 * Hence caller must use IS_ERR_VALUE to check for error, this
363 * allows us to return 0 for single core systems without forcing
364 * callers to deal with SMP vs. NONSMP issues.
365 */
366static inline u64 xt_percpu_counter_alloc(void)
367{
368 if (nr_cpu_ids > 1) {
369 void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
370 sizeof(struct xt_counters));
371
372 if (res == NULL)
373 return (u64) -ENOMEM;
374
375 return (u64) (__force unsigned long) res;
376 }
377
378 return 0;
379}
380static inline void xt_percpu_counter_free(u64 pcnt)
381{
382 if (nr_cpu_ids > 1)
383 free_percpu((void __percpu *) (unsigned long) pcnt);
384}
385
386static inline struct xt_counters *
387xt_get_this_cpu_counter(struct xt_counters *cnt)
388{
389 if (nr_cpu_ids > 1)
390 return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
391
392 return cnt;
393}
394
395static inline struct xt_counters *
396xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
397{
398 if (nr_cpu_ids > 1)
399 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
400
401 return cnt;
402}
403
354struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); 404struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
355void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); 405void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
356 406
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f2fdb5a52070..6d80fc686323 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -20,13 +20,6 @@ enum nf_br_hook_priorities {
20#define BRNF_BRIDGED_DNAT 0x02 20#define BRNF_BRIDGED_DNAT 0x02
21#define BRNF_NF_BRIDGE_PREROUTING 0x08 21#define BRNF_NF_BRIDGE_PREROUTING 0x08
22 22
23static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
24{
25 if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
26 return PPPOE_SES_HLEN;
27 return 0;
28}
29
30int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); 23int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
31 24
32static inline void br_drop_fake_rtable(struct sk_buff *skb) 25static inline void br_drop_fake_rtable(struct sk_buff *skb)
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index f1bd3962e6b6..8ca6d6464ea3 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * ebtables.c,v 2.0, April, 2002 7 * ebtables.c,v 2.0, April, 2002
8 * 8 *
9 * This code is stongly inspired on the iptables code which is 9 * This code is strongly inspired by the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 */ 11 */
12#ifndef __LINUX_BRIDGE_EFF_H 12#ifndef __LINUX_BRIDGE_EFF_H
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
new file mode 100644
index 000000000000..d3a7f8597e82
--- /dev/null
+++ b/include/linux/netfilter_defs.h
@@ -0,0 +1,9 @@
1#ifndef __LINUX_NETFILTER_CORE_H_
2#define __LINUX_NETFILTER_CORE_H_
3
4#include <uapi/linux/netfilter.h>
5
6/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
7#define NF_MAX_HOOKS 8
8
9#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
new file mode 100644
index 000000000000..cb0727fe2b3d
--- /dev/null
+++ b/include/linux/netfilter_ingress.h
@@ -0,0 +1,41 @@
1#ifndef _NETFILTER_INGRESS_H_
2#define _NETFILTER_INGRESS_H_
3
4#include <linux/netfilter.h>
5#include <linux/netdevice.h>
6
7#ifdef CONFIG_NETFILTER_INGRESS
8static inline int nf_hook_ingress_active(struct sk_buff *skb)
9{
10 return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
11 NFPROTO_NETDEV, NF_NETDEV_INGRESS);
12}
13
14static inline int nf_hook_ingress(struct sk_buff *skb)
15{
16 struct nf_hook_state state;
17
18 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
19 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
20 skb->dev, NULL, NULL);
21 return nf_hook_slow(skb, &state);
22}
23
24static inline void nf_hook_ingress_init(struct net_device *dev)
25{
26 INIT_LIST_HEAD(&dev->nf_hooks_ingress);
27}
28#else /* CONFIG_NETFILTER_INGRESS */
29static inline int nf_hook_ingress_active(struct sk_buff *skb)
30{
31 return 0;
32}
33
34static inline int nf_hook_ingress(struct sk_buff *skb)
35{
36 return 0;
37}
38
39static inline void nf_hook_ingress_init(struct net_device *dev) {}
40#endif /* CONFIG_NETFILTER_INGRESS */
41#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 64dad1cc1a4b..8b7d28f3aada 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -25,6 +25,9 @@ void ipv6_netfilter_fini(void);
25struct nf_ipv6_ops { 25struct nf_ipv6_ops {
26 int (*chk_addr)(struct net *net, const struct in6_addr *addr, 26 int (*chk_addr)(struct net *net, const struct in6_addr *addr,
27 const struct net_device *dev, int strict); 27 const struct net_device *dev, int strict);
28 void (*route_input)(struct sk_buff *skb);
29 int (*fragment)(struct sock *sk, struct sk_buff *skb,
30 int (*output)(struct sock *, struct sk_buff *));
28}; 31};
29 32
30extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; 33extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6835c1279df7..9120edb650a0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -28,6 +28,8 @@ struct netlink_skb_parms {
28 __u32 dst_group; 28 __u32 dst_group;
29 __u32 flags; 29 __u32 flags;
30 struct sock *sk; 30 struct sock *sk;
31 bool nsid_is_set;
32 int nsid;
31}; 33};
32 34
33#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) 35#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 32201c269890..b8e72aad919c 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -500,6 +500,7 @@ enum {
500 NFSPROC4_CLNT_SEEK, 500 NFSPROC4_CLNT_SEEK,
501 NFSPROC4_CLNT_ALLOCATE, 501 NFSPROC4_CLNT_ALLOCATE,
502 NFSPROC4_CLNT_DEALLOCATE, 502 NFSPROC4_CLNT_DEALLOCATE,
503 NFSPROC4_CLNT_LAYOUTSTATS,
503}; 504};
504 505
505/* nfs41 types */ 506/* nfs41 types */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index b95f914ce083..874b77228fb9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -219,6 +219,7 @@ struct nfs_inode {
219#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ 219#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */
220#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ 220#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
221#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ 221#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
222#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
222 223
223static inline struct nfs_inode *NFS_I(const struct inode *inode) 224static inline struct nfs_inode *NFS_I(const struct inode *inode)
224{ 225{
@@ -291,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
291 struct nfs_inode *nfsi = NFS_I(inode); 292 struct nfs_inode *nfsi = NFS_I(inode);
292 293
293 spin_lock(&inode->i_lock); 294 spin_lock(&inode->i_lock);
294 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
296 NFS_INO_REVAL_PAGECACHE |
297 NFS_INO_INVALID_ACCESS |
298 NFS_INO_INVALID_ACL;
295 if (S_ISDIR(inode->i_mode)) 299 if (S_ISDIR(inode->i_mode))
296 nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; 300 nfsi->cache_validity |= NFS_INO_INVALID_DATA;
297 spin_unlock(&inode->i_lock); 301 spin_unlock(&inode->i_lock);
298} 302}
299 303
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 5e1273d4de14..20bc8e51b161 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -220,7 +220,7 @@ struct nfs_server {
220#define NFS_CAP_SYMLINKS (1U << 2) 220#define NFS_CAP_SYMLINKS (1U << 2)
221#define NFS_CAP_ACLS (1U << 3) 221#define NFS_CAP_ACLS (1U << 3)
222#define NFS_CAP_ATOMIC_OPEN (1U << 4) 222#define NFS_CAP_ATOMIC_OPEN (1U << 4)
223#define NFS_CAP_CHANGE_ATTR (1U << 5) 223/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
224#define NFS_CAP_FILEID (1U << 6) 224#define NFS_CAP_FILEID (1U << 6)
225#define NFS_CAP_MODE (1U << 7) 225#define NFS_CAP_MODE (1U << 7)
226#define NFS_CAP_NLINK (1U << 8) 226#define NFS_CAP_NLINK (1U << 8)
@@ -237,5 +237,6 @@ struct nfs_server {
237#define NFS_CAP_SEEK (1U << 19) 237#define NFS_CAP_SEEK (1U << 19)
238#define NFS_CAP_ALLOCATE (1U << 20) 238#define NFS_CAP_ALLOCATE (1U << 20)
239#define NFS_CAP_DEALLOCATE (1U << 21) 239#define NFS_CAP_DEALLOCATE (1U << 21)
240#define NFS_CAP_LAYOUTSTATS (1U << 22)
240 241
241#endif 242#endif
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 3eb072dbce83..f2f650f136ee 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -67,7 +67,6 @@ struct nfs_rw_ops {
67 const fmode_t rw_mode; 67 const fmode_t rw_mode;
68 struct nfs_pgio_header *(*rw_alloc_header)(void); 68 struct nfs_pgio_header *(*rw_alloc_header)(void);
69 void (*rw_free_header)(struct nfs_pgio_header *); 69 void (*rw_free_header)(struct nfs_pgio_header *);
70 void (*rw_release)(struct nfs_pgio_header *);
71 int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, 70 int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *,
72 struct inode *); 71 struct inode *);
73 void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); 72 void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 93ab6071bbe9..7bbe50504211 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -316,6 +316,49 @@ struct nfs4_layoutreturn {
316 int rpc_status; 316 int rpc_status;
317}; 317};
318 318
319#define PNFS_LAYOUTSTATS_MAXSIZE 256
320
321struct nfs42_layoutstat_args;
322struct nfs42_layoutstat_devinfo;
323typedef void (*layoutstats_encode_t)(struct xdr_stream *,
324 struct nfs42_layoutstat_args *,
325 struct nfs42_layoutstat_devinfo *);
326
327/* Per file per deviceid layoutstats */
328struct nfs42_layoutstat_devinfo {
329 struct nfs4_deviceid dev_id;
330 __u64 offset;
331 __u64 length;
332 __u64 read_count;
333 __u64 read_bytes;
334 __u64 write_count;
335 __u64 write_bytes;
336 __u32 layout_type;
337 layoutstats_encode_t layoutstats_encode;
338 void *layout_private;
339};
340
341struct nfs42_layoutstat_args {
342 struct nfs4_sequence_args seq_args;
343 struct nfs_fh *fh;
344 struct inode *inode;
345 nfs4_stateid stateid;
346 int num_dev;
347 struct nfs42_layoutstat_devinfo *devinfo;
348};
349
350struct nfs42_layoutstat_res {
351 struct nfs4_sequence_res seq_res;
352 int num_dev;
353 int rpc_status;
354};
355
356struct nfs42_layoutstat_data {
357 struct inode *inode;
358 struct nfs42_layoutstat_args args;
359 struct nfs42_layoutstat_res res;
360};
361
319struct stateowner_id { 362struct stateowner_id {
320 __u64 create_time; 363 __u64 create_time;
321 __u32 uniquifier; 364 __u32 uniquifier;
@@ -984,17 +1027,14 @@ struct nfs4_readlink_res {
984 struct nfs4_sequence_res seq_res; 1027 struct nfs4_sequence_res seq_res;
985}; 1028};
986 1029
987#define NFS4_SETCLIENTID_NAMELEN (127)
988struct nfs4_setclientid { 1030struct nfs4_setclientid {
989 const nfs4_verifier * sc_verifier; 1031 const nfs4_verifier * sc_verifier;
990 unsigned int sc_name_len;
991 char sc_name[NFS4_SETCLIENTID_NAMELEN + 1];
992 u32 sc_prog; 1032 u32 sc_prog;
993 unsigned int sc_netid_len; 1033 unsigned int sc_netid_len;
994 char sc_netid[RPCBIND_MAXNETIDLEN + 1]; 1034 char sc_netid[RPCBIND_MAXNETIDLEN + 1];
995 unsigned int sc_uaddr_len; 1035 unsigned int sc_uaddr_len;
996 char sc_uaddr[RPCBIND_MAXUADDRLEN + 1]; 1036 char sc_uaddr[RPCBIND_MAXUADDRLEN + 1];
997 u32 sc_cb_ident; 1037 struct nfs_client *sc_clnt;
998 struct rpc_cred *sc_cred; 1038 struct rpc_cred *sc_cred;
999}; 1039};
1000 1040
@@ -1142,12 +1182,9 @@ struct nfs41_state_protection {
1142 struct nfs4_op_map allow; 1182 struct nfs4_op_map allow;
1143}; 1183};
1144 1184
1145#define NFS4_EXCHANGE_ID_LEN (48)
1146struct nfs41_exchange_id_args { 1185struct nfs41_exchange_id_args {
1147 struct nfs_client *client; 1186 struct nfs_client *client;
1148 nfs4_verifier *verifier; 1187 nfs4_verifier *verifier;
1149 unsigned int id_len;
1150 char id[NFS4_EXCHANGE_ID_LEN];
1151 u32 flags; 1188 u32 flags;
1152 struct nfs41_state_protection state_protect; 1189 struct nfs41_state_protection state_protect;
1153}; 1190};
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 3d46fb4708e0..f94da0e65dea 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -67,6 +67,7 @@ extern int nmi_watchdog_enabled;
67extern int soft_watchdog_enabled; 67extern int soft_watchdog_enabled;
68extern int watchdog_user_enabled; 68extern int watchdog_user_enabled;
69extern int watchdog_thresh; 69extern int watchdog_thresh;
70extern unsigned long *watchdog_cpumask_bits;
70extern int sysctl_softlockup_all_cpu_backtrace; 71extern int sysctl_softlockup_all_cpu_backtrace;
71struct ctl_table; 72struct ctl_table;
72extern int proc_watchdog(struct ctl_table *, int , 73extern int proc_watchdog(struct ctl_table *, int ,
@@ -77,6 +78,8 @@ extern int proc_soft_watchdog(struct ctl_table *, int ,
77 void __user *, size_t *, loff_t *); 78 void __user *, size_t *, loff_t *);
78extern int proc_watchdog_thresh(struct ctl_table *, int , 79extern int proc_watchdog_thresh(struct ctl_table *, int ,
79 void __user *, size_t *, loff_t *); 80 void __user *, size_t *, loff_t *);
81extern int proc_watchdog_cpumask(struct ctl_table *, int,
82 void __user *, size_t *, loff_t *);
80#endif 83#endif
81 84
82#ifdef CONFIG_HAVE_ACPI_APEI_NMI 85#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 9ac1a62fc6f5..b02f72bb8e32 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -4,15 +4,20 @@
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 7 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
13 * BSD LICENSE 18 * BSD LICENSE
14 * 19 *
15 * Copyright(c) 2012 Intel Corporation. All rights reserved. 20 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
16 * 21 *
17 * Redistribution and use in source and binary forms, with or without 22 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions 23 * modification, are permitted provided that the following conditions
@@ -40,49 +45,940 @@
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * 47 *
43 * Intel PCIe NTB Linux driver 48 * PCIe NTB Linux driver
44 * 49 *
45 * Contact Information: 50 * Contact Information:
46 * Jon Mason <jon.mason@intel.com> 51 * Allen Hubbe <Allen.Hubbe@emc.com>
47 */ 52 */
48 53
49struct ntb_transport_qp; 54#ifndef _NTB_H_
55#define _NTB_H_
50 56
51struct ntb_client { 57#include <linux/completion.h>
52 struct device_driver driver; 58#include <linux/device.h>
53 int (*probe)(struct pci_dev *pdev); 59
54 void (*remove)(struct pci_dev *pdev); 60struct ntb_client;
61struct ntb_dev;
62struct pci_dev;
63
64/**
65 * enum ntb_topo - NTB connection topology
66 * @NTB_TOPO_NONE: Topology is unknown or invalid.
67 * @NTB_TOPO_PRI: On primary side of local ntb.
68 * @NTB_TOPO_SEC: On secondary side of remote ntb.
69 * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
70 * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
71 */
72enum ntb_topo {
73 NTB_TOPO_NONE = -1,
74 NTB_TOPO_PRI,
75 NTB_TOPO_SEC,
76 NTB_TOPO_B2B_USD,
77 NTB_TOPO_B2B_DSD,
78};
79
80static inline int ntb_topo_is_b2b(enum ntb_topo topo)
81{
82 switch ((int)topo) {
83 case NTB_TOPO_B2B_USD:
84 case NTB_TOPO_B2B_DSD:
85 return 1;
86 }
87 return 0;
88}
89
90static inline char *ntb_topo_string(enum ntb_topo topo)
91{
92 switch (topo) {
93 case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
94 case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
95 case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
96 case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
97 case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
98 }
99 return "NTB_TOPO_INVALID";
100}
101
102/**
103 * enum ntb_speed - NTB link training speed
104 * @NTB_SPEED_AUTO: Request the max supported speed.
105 * @NTB_SPEED_NONE: Link is not trained to any speed.
106 * @NTB_SPEED_GEN1: Link is trained to gen1 speed.
107 * @NTB_SPEED_GEN2: Link is trained to gen2 speed.
108 * @NTB_SPEED_GEN3: Link is trained to gen3 speed.
109 */
110enum ntb_speed {
111 NTB_SPEED_AUTO = -1,
112 NTB_SPEED_NONE = 0,
113 NTB_SPEED_GEN1 = 1,
114 NTB_SPEED_GEN2 = 2,
115 NTB_SPEED_GEN3 = 3,
116};
117
118/**
119 * enum ntb_width - NTB link training width
120 * @NTB_WIDTH_AUTO: Request the max supported width.
121 * @NTB_WIDTH_NONE: Link is not trained to any width.
122 * @NTB_WIDTH_1: Link is trained to 1 lane width.
123 * @NTB_WIDTH_2: Link is trained to 2 lane width.
124 * @NTB_WIDTH_4: Link is trained to 4 lane width.
125 * @NTB_WIDTH_8: Link is trained to 8 lane width.
126 * @NTB_WIDTH_12: Link is trained to 12 lane width.
127 * @NTB_WIDTH_16: Link is trained to 16 lane width.
128 * @NTB_WIDTH_32: Link is trained to 32 lane width.
129 */
130enum ntb_width {
131 NTB_WIDTH_AUTO = -1,
132 NTB_WIDTH_NONE = 0,
133 NTB_WIDTH_1 = 1,
134 NTB_WIDTH_2 = 2,
135 NTB_WIDTH_4 = 4,
136 NTB_WIDTH_8 = 8,
137 NTB_WIDTH_12 = 12,
138 NTB_WIDTH_16 = 16,
139 NTB_WIDTH_32 = 32,
140};
141
142/**
143 * struct ntb_client_ops - ntb client operations
144 * @probe: Notify client of a new device.
145 * @remove: Notify client to remove a device.
146 */
147struct ntb_client_ops {
148 int (*probe)(struct ntb_client *client, struct ntb_dev *ntb);
149 void (*remove)(struct ntb_client *client, struct ntb_dev *ntb);
150};
151
152static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
153{
154 /* commented callbacks are not required: */
155 return
156 ops->probe &&
157 ops->remove &&
158 1;
159}
160
161/**
162 * struct ntb_ctx_ops - ntb driver context operations
163 * @link_event: See ntb_link_event().
164 * @db_event: See ntb_db_event().
165 */
166struct ntb_ctx_ops {
167 void (*link_event)(void *ctx);
168 void (*db_event)(void *ctx, int db_vector);
169};
170
171static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
172{
173 /* commented callbacks are not required: */
174 return
175 /* ops->link_event && */
176 /* ops->db_event && */
177 1;
178}
179
180/**
181 * struct ntb_ctx_ops - ntb device operations
182 * @mw_count: See ntb_mw_count().
183 * @mw_get_range: See ntb_mw_get_range().
184 * @mw_set_trans: See ntb_mw_set_trans().
185 * @mw_clear_trans: See ntb_mw_clear_trans().
186 * @link_is_up: See ntb_link_is_up().
187 * @link_enable: See ntb_link_enable().
188 * @link_disable: See ntb_link_disable().
189 * @db_is_unsafe: See ntb_db_is_unsafe().
190 * @db_valid_mask: See ntb_db_valid_mask().
191 * @db_vector_count: See ntb_db_vector_count().
192 * @db_vector_mask: See ntb_db_vector_mask().
193 * @db_read: See ntb_db_read().
194 * @db_set: See ntb_db_set().
195 * @db_clear: See ntb_db_clear().
196 * @db_read_mask: See ntb_db_read_mask().
197 * @db_set_mask: See ntb_db_set_mask().
198 * @db_clear_mask: See ntb_db_clear_mask().
199 * @peer_db_addr: See ntb_peer_db_addr().
200 * @peer_db_read: See ntb_peer_db_read().
201 * @peer_db_set: See ntb_peer_db_set().
202 * @peer_db_clear: See ntb_peer_db_clear().
203 * @peer_db_read_mask: See ntb_peer_db_read_mask().
204 * @peer_db_set_mask: See ntb_peer_db_set_mask().
205 * @peer_db_clear_mask: See ntb_peer_db_clear_mask().
206 * @spad_is_unsafe: See ntb_spad_is_unsafe().
207 * @spad_count: See ntb_spad_count().
208 * @spad_read: See ntb_spad_read().
209 * @spad_write: See ntb_spad_write().
210 * @peer_spad_addr: See ntb_peer_spad_addr().
211 * @peer_spad_read: See ntb_peer_spad_read().
212 * @peer_spad_write: See ntb_peer_spad_write().
213 */
214struct ntb_dev_ops {
215 int (*mw_count)(struct ntb_dev *ntb);
216 int (*mw_get_range)(struct ntb_dev *ntb, int idx,
217 phys_addr_t *base, resource_size_t *size,
218 resource_size_t *align, resource_size_t *align_size);
219 int (*mw_set_trans)(struct ntb_dev *ntb, int idx,
220 dma_addr_t addr, resource_size_t size);
221 int (*mw_clear_trans)(struct ntb_dev *ntb, int idx);
222
223 int (*link_is_up)(struct ntb_dev *ntb,
224 enum ntb_speed *speed, enum ntb_width *width);
225 int (*link_enable)(struct ntb_dev *ntb,
226 enum ntb_speed max_speed, enum ntb_width max_width);
227 int (*link_disable)(struct ntb_dev *ntb);
228
229 int (*db_is_unsafe)(struct ntb_dev *ntb);
230 u64 (*db_valid_mask)(struct ntb_dev *ntb);
231 int (*db_vector_count)(struct ntb_dev *ntb);
232 u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector);
233
234 u64 (*db_read)(struct ntb_dev *ntb);
235 int (*db_set)(struct ntb_dev *ntb, u64 db_bits);
236 int (*db_clear)(struct ntb_dev *ntb, u64 db_bits);
237
238 u64 (*db_read_mask)(struct ntb_dev *ntb);
239 int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
240 int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
241
242 int (*peer_db_addr)(struct ntb_dev *ntb,
243 phys_addr_t *db_addr, resource_size_t *db_size);
244 u64 (*peer_db_read)(struct ntb_dev *ntb);
245 int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
246 int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
247
248 u64 (*peer_db_read_mask)(struct ntb_dev *ntb);
249 int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
250 int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
251
252 int (*spad_is_unsafe)(struct ntb_dev *ntb);
253 int (*spad_count)(struct ntb_dev *ntb);
254
255 u32 (*spad_read)(struct ntb_dev *ntb, int idx);
256 int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val);
257
258 int (*peer_spad_addr)(struct ntb_dev *ntb, int idx,
259 phys_addr_t *spad_addr);
260 u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx);
261 int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val);
55}; 262};
56 263
57enum { 264static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
58 NTB_LINK_DOWN = 0, 265{
59 NTB_LINK_UP, 266 /* commented callbacks are not required: */
267 return
268 ops->mw_count &&
269 ops->mw_get_range &&
270 ops->mw_set_trans &&
271 /* ops->mw_clear_trans && */
272 ops->link_is_up &&
273 ops->link_enable &&
274 ops->link_disable &&
275 /* ops->db_is_unsafe && */
276 ops->db_valid_mask &&
277
278 /* both set, or both unset */
279 (!ops->db_vector_count == !ops->db_vector_mask) &&
280
281 ops->db_read &&
282 /* ops->db_set && */
283 ops->db_clear &&
284 /* ops->db_read_mask && */
285 ops->db_set_mask &&
286 ops->db_clear_mask &&
287 ops->peer_db_addr &&
288 /* ops->peer_db_read && */
289 ops->peer_db_set &&
290 /* ops->peer_db_clear && */
291 /* ops->peer_db_read_mask && */
292 /* ops->peer_db_set_mask && */
293 /* ops->peer_db_clear_mask && */
294 /* ops->spad_is_unsafe && */
295 ops->spad_count &&
296 ops->spad_read &&
297 ops->spad_write &&
298 ops->peer_spad_addr &&
299 /* ops->peer_spad_read && */
300 ops->peer_spad_write &&
301 1;
302}
303
304/**
305 * struct ntb_client - client interested in ntb devices
306 * @drv: Linux driver object.
307 * @ops: See &ntb_client_ops.
308 */
309struct ntb_client {
310 struct device_driver drv;
311 const struct ntb_client_ops ops;
60}; 312};
61 313
62int ntb_register_client(struct ntb_client *drvr); 314#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
63void ntb_unregister_client(struct ntb_client *drvr); 315
64int ntb_register_client_dev(char *device_name); 316/**
65void ntb_unregister_client_dev(char *device_name); 317 * struct ntb_device - ntb device
66 318 * @dev: Linux device object.
67struct ntb_queue_handlers { 319 * @pdev: Pci device entry of the ntb.
68 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 320 * @topo: Detected topology of the ntb.
69 void *data, int len); 321 * @ops: See &ntb_dev_ops.
70 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 322 * @ctx: See &ntb_ctx_ops.
71 void *data, int len); 323 * @ctx_ops: See &ntb_ctx_ops.
72 void (*event_handler)(void *data, int status); 324 */
325struct ntb_dev {
326 struct device dev;
327 struct pci_dev *pdev;
328 enum ntb_topo topo;
329 const struct ntb_dev_ops *ops;
330 void *ctx;
331 const struct ntb_ctx_ops *ctx_ops;
332
333 /* private: */
334
335 /* synchronize setting, clearing, and calling ctx_ops */
336 spinlock_t ctx_lock;
337 /* block unregister until device is fully released */
338 struct completion released;
73}; 339};
74 340
75unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); 341#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
76unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 342
77struct ntb_transport_qp * 343/**
78ntb_transport_create_queue(void *data, struct pci_dev *pdev, 344 * ntb_register_client() - register a client for interest in ntb devices
79 const struct ntb_queue_handlers *handlers); 345 * @client: Client context.
80void ntb_transport_free_queue(struct ntb_transport_qp *qp); 346 *
81int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 347 * The client will be added to the list of clients interested in ntb devices.
82 unsigned int len); 348 * The client will be notified of any ntb devices that are not already
83int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 349 * associated with a client, or if ntb devices are registered later.
84 unsigned int len); 350 *
85void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); 351 * Return: Zero if the client is registered, otherwise an error number.
86void ntb_transport_link_up(struct ntb_transport_qp *qp); 352 */
87void ntb_transport_link_down(struct ntb_transport_qp *qp); 353#define ntb_register_client(client) \
88bool ntb_transport_link_query(struct ntb_transport_qp *qp); 354 __ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME)
355
356int __ntb_register_client(struct ntb_client *client, struct module *mod,
357 const char *mod_name);
358
359/**
360 * ntb_unregister_client() - unregister a client for interest in ntb devices
361 * @client: Client context.
362 *
363 * The client will be removed from the list of clients interested in ntb
364 * devices. If any ntb devices are associated with the client, the client will
365 * be notified to remove those devices.
366 */
367void ntb_unregister_client(struct ntb_client *client);
368
369#define module_ntb_client(__ntb_client) \
370 module_driver(__ntb_client, ntb_register_client, \
371 ntb_unregister_client)
372
373/**
374 * ntb_register_device() - register a ntb device
375 * @ntb: NTB device context.
376 *
377 * The device will be added to the list of ntb devices. If any clients are
378 * interested in ntb devices, each client will be notified of the ntb device,
379 * until at most one client accepts the device.
380 *
381 * Return: Zero if the device is registered, otherwise an error number.
382 */
383int ntb_register_device(struct ntb_dev *ntb);
384
385/**
386 * ntb_register_device() - unregister a ntb device
387 * @ntb: NTB device context.
388 *
389 * The device will be removed from the list of ntb devices. If the ntb device
390 * is associated with a client, the client will be notified to remove the
391 * device.
392 */
393void ntb_unregister_device(struct ntb_dev *ntb);
394
395/**
396 * ntb_set_ctx() - associate a driver context with an ntb device
397 * @ntb: NTB device context.
398 * @ctx: Driver context.
399 * @ctx_ops: Driver context operations.
400 *
401 * Associate a driver context and operations with a ntb device. The context is
402 * provided by the client driver, and the driver may associate a different
403 * context with each ntb device.
404 *
405 * Return: Zero if the context is associated, otherwise an error number.
406 */
407int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
408 const struct ntb_ctx_ops *ctx_ops);
409
410/**
411 * ntb_clear_ctx() - disassociate any driver context from an ntb device
412 * @ntb: NTB device context.
413 *
414 * Clear any association that may exist between a driver context and the ntb
415 * device.
416 */
417void ntb_clear_ctx(struct ntb_dev *ntb);
418
419/**
420 * ntb_link_event() - notify driver context of a change in link status
421 * @ntb: NTB device context.
422 *
423 * Notify the driver context that the link status may have changed. The driver
424 * should call ntb_link_is_up() to get the current status.
425 */
426void ntb_link_event(struct ntb_dev *ntb);
427
428/**
429 * ntb_db_event() - notify driver context of a doorbell event
430 * @ntb: NTB device context.
431 * @vector: Interrupt vector number.
432 *
433 * Notify the driver context of a doorbell event. If hardware supports
434 * multiple interrupt vectors for doorbells, the vector number indicates which
435 * vector received the interrupt. The vector number is relative to the first
436 * vector used for doorbells, starting at zero, and must be less than
437 ** ntb_db_vector_count(). The driver may call ntb_db_read() to check which
438 * doorbell bits need service, and ntb_db_vector_mask() to determine which of
439 * those bits are associated with the vector number.
440 */
441void ntb_db_event(struct ntb_dev *ntb, int vector);
442
443/**
444 * ntb_mw_count() - get the number of memory windows
445 * @ntb: NTB device context.
446 *
447 * Hardware and topology may support a different number of memory windows.
448 *
449 * Return: the number of memory windows.
450 */
451static inline int ntb_mw_count(struct ntb_dev *ntb)
452{
453 return ntb->ops->mw_count(ntb);
454}
455
456/**
457 * ntb_mw_get_range() - get the range of a memory window
458 * @ntb: NTB device context.
459 * @idx: Memory window number.
460 * @base: OUT - the base address for mapping the memory window
461 * @size: OUT - the size for mapping the memory window
462 * @align: OUT - the base alignment for translating the memory window
463 * @align_size: OUT - the size alignment for translating the memory window
464 *
465 * Get the range of a memory window. NULL may be given for any output
466 * parameter if the value is not needed. The base and size may be used for
467 * mapping the memory window, to access the peer memory. The alignment and
468 * size may be used for translating the memory window, for the peer to access
469 * memory on the local system.
470 *
471 * Return: Zero on success, otherwise an error number.
472 */
473static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx,
474 phys_addr_t *base, resource_size_t *size,
475 resource_size_t *align, resource_size_t *align_size)
476{
477 return ntb->ops->mw_get_range(ntb, idx, base, size,
478 align, align_size);
479}
480
481/**
482 * ntb_mw_set_trans() - set the translation of a memory window
483 * @ntb: NTB device context.
484 * @idx: Memory window number.
485 * @addr: The dma address local memory to expose to the peer.
486 * @size: The size of the local memory to expose to the peer.
487 *
488 * Set the translation of a memory window. The peer may access local memory
489 * through the window starting at the address, up to the size. The address
490 * must be aligned to the alignment specified by ntb_mw_get_range(). The size
491 * must be aligned to the size alignment specified by ntb_mw_get_range().
492 *
493 * Return: Zero on success, otherwise an error number.
494 */
495static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
496 dma_addr_t addr, resource_size_t size)
497{
498 return ntb->ops->mw_set_trans(ntb, idx, addr, size);
499}
500
501/**
502 * ntb_mw_clear_trans() - clear the translation of a memory window
503 * @ntb: NTB device context.
504 * @idx: Memory window number.
505 *
506 * Clear the translation of a memory window. The peer may no longer access
507 * local memory through the window.
508 *
509 * Return: Zero on success, otherwise an error number.
510 */
511static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
512{
513 if (!ntb->ops->mw_clear_trans)
514 return ntb->ops->mw_set_trans(ntb, idx, 0, 0);
515
516 return ntb->ops->mw_clear_trans(ntb, idx);
517}
518
519/**
520 * ntb_link_is_up() - get the current ntb link state
521 * @ntb: NTB device context.
522 * @speed: OUT - The link speed expressed as PCIe generation number.
523 * @width: OUT - The link width expressed as the number of PCIe lanes.
524 *
525 * Set the translation of a memory window. The peer may access local memory
526 * through the window starting at the address, up to the size. The address
527 * must be aligned to the alignment specified by ntb_mw_get_range(). The size
528 * must be aligned to the size alignment specified by ntb_mw_get_range().
529 *
530 * Return: One if the link is up, zero if the link is down, otherwise a
531 * negative value indicating the error number.
532 */
533static inline int ntb_link_is_up(struct ntb_dev *ntb,
534 enum ntb_speed *speed, enum ntb_width *width)
535{
536 return ntb->ops->link_is_up(ntb, speed, width);
537}
538
539/**
540 * ntb_link_enable() - enable the link on the secondary side of the ntb
541 * @ntb: NTB device context.
542 * @max_speed: The maximum link speed expressed as PCIe generation number.
543 * @max_width: The maximum link width expressed as the number of PCIe lanes.
544 *
545 * Enable the link on the secondary side of the ntb. This can only be done
546 * from the primary side of the ntb in primary or b2b topology. The ntb device
547 * should train the link to its maximum speed and width, or the requested speed
548 * and width, whichever is smaller, if supported.
549 *
550 * Return: Zero on success, otherwise an error number.
551 */
552static inline int ntb_link_enable(struct ntb_dev *ntb,
553 enum ntb_speed max_speed,
554 enum ntb_width max_width)
555{
556 return ntb->ops->link_enable(ntb, max_speed, max_width);
557}
558
559/**
560 * ntb_link_disable() - disable the link on the secondary side of the ntb
561 * @ntb: NTB device context.
562 *
563 * Disable the link on the secondary side of the ntb. This can only be
564 * done from the primary side of the ntb in primary or b2b topology. The ntb
565 * device should disable the link. Returning from this call must indicate that
566 * a barrier has passed, though with no more writes may pass in either
567 * direction across the link, except if this call returns an error number.
568 *
569 * Return: Zero on success, otherwise an error number.
570 */
571static inline int ntb_link_disable(struct ntb_dev *ntb)
572{
573 return ntb->ops->link_disable(ntb);
574}
575
576/**
577 * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
578 * @ntb: NTB device context.
579 *
580 * It is possible for some ntb hardware to be affected by errata. Hardware
581 * drivers can advise clients to avoid using doorbells. Clients may ignore
582 * this advice, though caution is recommended.
583 *
584 * Return: Zero if it is safe to use doorbells, or One if it is not safe.
585 */
586static inline int ntb_db_is_unsafe(struct ntb_dev *ntb)
587{
588 if (!ntb->ops->db_is_unsafe)
589 return 0;
590
591 return ntb->ops->db_is_unsafe(ntb);
592}
593
594/**
595 * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
596 * @ntb: NTB device context.
597 *
598 * Hardware may support different number or arrangement of doorbell bits.
599 *
600 * Return: A mask of doorbell bits supported by the ntb.
601 */
602static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb)
603{
604 return ntb->ops->db_valid_mask(ntb);
605}
606
607/**
608 * ntb_db_vector_count() - get the number of doorbell interrupt vectors
609 * @ntb: NTB device context.
610 *
611 * Hardware may support different number of interrupt vectors.
612 *
613 * Return: The number of doorbell interrupt vectors.
614 */
615static inline int ntb_db_vector_count(struct ntb_dev *ntb)
616{
617 if (!ntb->ops->db_vector_count)
618 return 1;
619
620 return ntb->ops->db_vector_count(ntb);
621}
622
623/**
624 * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
625 * @ntb: NTB device context.
626 * @vector: Doorbell vector number.
627 *
628 * Each interrupt vector may have a different number or arrangement of bits.
629 *
630 * Return: A mask of doorbell bits serviced by a vector.
631 */
632static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector)
633{
634 if (!ntb->ops->db_vector_mask)
635 return ntb_db_valid_mask(ntb);
636
637 return ntb->ops->db_vector_mask(ntb, vector);
638}
639
640/**
641 * ntb_db_read() - read the local doorbell register
642 * @ntb: NTB device context.
643 *
644 * Read the local doorbell register, and return the bits that are set.
645 *
646 * Return: The bits currently set in the local doorbell register.
647 */
648static inline u64 ntb_db_read(struct ntb_dev *ntb)
649{
650 return ntb->ops->db_read(ntb);
651}
652
653/**
654 * ntb_db_set() - set bits in the local doorbell register
655 * @ntb: NTB device context.
656 * @db_bits: Doorbell bits to set.
657 *
658 * Set bits in the local doorbell register, which may generate a local doorbell
659 * interrupt. Bits that were already set must remain set.
660 *
661 * This is unusual, and hardware may not support it.
662 *
663 * Return: Zero on success, otherwise an error number.
664 */
665static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits)
666{
667 if (!ntb->ops->db_set)
668 return -EINVAL;
669
670 return ntb->ops->db_set(ntb, db_bits);
671}
672
673/**
674 * ntb_db_clear() - clear bits in the local doorbell register
675 * @ntb: NTB device context.
676 * @db_bits: Doorbell bits to clear.
677 *
678 * Clear bits in the local doorbell register, arming the bits for the next
679 * doorbell.
680 *
681 * Return: Zero on success, otherwise an error number.
682 */
683static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
684{
685 return ntb->ops->db_clear(ntb, db_bits);
686}
687
688/**
689 * ntb_db_read_mask() - read the local doorbell mask
690 * @ntb: NTB device context.
691 *
692 * Read the local doorbell mask register, and return the bits that are set.
693 *
694 * This is unusual, though hardware is likely to support it.
695 *
696 * Return: The bits currently set in the local doorbell mask register.
697 */
698static inline u64 ntb_db_read_mask(struct ntb_dev *ntb)
699{
700 if (!ntb->ops->db_read_mask)
701 return 0;
702
703 return ntb->ops->db_read_mask(ntb);
704}
705
706/**
707 * ntb_db_set_mask() - set bits in the local doorbell mask
708 * @ntb: NTB device context.
709 * @db_bits: Doorbell mask bits to set.
710 *
711 * Set bits in the local doorbell mask register, preventing doorbell interrupts
712 * from being generated for those doorbell bits. Bits that were already set
713 * must remain set.
714 *
715 * Return: Zero on success, otherwise an error number.
716 */
717static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
718{
719 return ntb->ops->db_set_mask(ntb, db_bits);
720}
721
722/**
723 * ntb_db_clear_mask() - clear bits in the local doorbell mask
724 * @ntb: NTB device context.
725 * @db_bits: Doorbell bits to clear.
726 *
727 * Clear bits in the local doorbell mask register, allowing doorbell interrupts
728 * from being generated for those doorbell bits. If a doorbell bit is already
729 * set at the time the mask is cleared, and the corresponding mask bit is
730 * changed from set to clear, then the ntb driver must ensure that
731 * ntb_db_event() is called. If the hardware does not generate the interrupt
732 * on clearing the mask bit, then the driver must call ntb_db_event() anyway.
733 *
734 * Return: Zero on success, otherwise an error number.
735 */
736static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
737{
738 return ntb->ops->db_clear_mask(ntb, db_bits);
739}
740
741/**
742 * ntb_peer_db_addr() - address and size of the peer doorbell register
743 * @ntb: NTB device context.
744 * @db_addr: OUT - The address of the peer doorbell register.
745 * @db_size: OUT - The number of bytes to write the peer doorbell register.
746 *
747 * Return the address of the peer doorbell register. This may be used, for
748 * example, by drivers that offload memory copy operations to a dma engine.
749 * The drivers may wish to ring the peer doorbell at the completion of memory
750 * copy operations. For efficiency, and to simplify ordering of operations
751 * between the dma memory copies and the ringing doorbell, the driver may
752 * append one additional dma memory copy with the doorbell register as the
753 * destination, after the memory copy operations.
754 *
755 * Return: Zero on success, otherwise an error number.
756 */
757static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
758 phys_addr_t *db_addr,
759 resource_size_t *db_size)
760{
761 return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
762}
763
764/**
765 * ntb_peer_db_read() - read the peer doorbell register
766 * @ntb: NTB device context.
767 *
768 * Read the peer doorbell register, and return the bits that are set.
769 *
770 * This is unusual, and hardware may not support it.
771 *
772 * Return: The bits currently set in the peer doorbell register.
773 */
774static inline u64 ntb_peer_db_read(struct ntb_dev *ntb)
775{
776 if (!ntb->ops->peer_db_read)
777 return 0;
778
779 return ntb->ops->peer_db_read(ntb);
780}
781
782/**
783 * ntb_peer_db_set() - set bits in the peer doorbell register
784 * @ntb: NTB device context.
785 * @db_bits: Doorbell bits to set.
786 *
787 * Set bits in the peer doorbell register, which may generate a peer doorbell
788 * interrupt. Bits that were already set must remain set.
789 *
790 * Return: Zero on success, otherwise an error number.
791 */
792static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
793{
794 return ntb->ops->peer_db_set(ntb, db_bits);
795}
796
797/**
798 * ntb_peer_db_clear() - clear bits in the local doorbell register
799 * @ntb: NTB device context.
800 * @db_bits: Doorbell bits to clear.
801 *
802 * Clear bits in the peer doorbell register, arming the bits for the next
803 * doorbell.
804 *
805 * This is unusual, and hardware may not support it.
806 *
807 * Return: Zero on success, otherwise an error number.
808 */
809static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits)
810{
811 if (!ntb->ops->db_clear)
812 return -EINVAL;
813
814 return ntb->ops->peer_db_clear(ntb, db_bits);
815}
816
817/**
818 * ntb_peer_db_read_mask() - read the peer doorbell mask
819 * @ntb: NTB device context.
820 *
821 * Read the peer doorbell mask register, and return the bits that are set.
822 *
823 * This is unusual, and hardware may not support it.
824 *
825 * Return: The bits currently set in the peer doorbell mask register.
826 */
827static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb)
828{
829 if (!ntb->ops->db_read_mask)
830 return 0;
831
832 return ntb->ops->peer_db_read_mask(ntb);
833}
834
835/**
836 * ntb_peer_db_set_mask() - set bits in the peer doorbell mask
837 * @ntb: NTB device context.
838 * @db_bits: Doorbell mask bits to set.
839 *
840 * Set bits in the peer doorbell mask register, preventing doorbell interrupts
841 * from being generated for those doorbell bits. Bits that were already set
842 * must remain set.
843 *
844 * This is unusual, and hardware may not support it.
845 *
846 * Return: Zero on success, otherwise an error number.
847 */
848static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
849{
850 if (!ntb->ops->db_set_mask)
851 return -EINVAL;
852
853 return ntb->ops->peer_db_set_mask(ntb, db_bits);
854}
855
856/**
857 * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask
858 * @ntb: NTB device context.
859 * @db_bits: Doorbell bits to clear.
860 *
861 * Clear bits in the peer doorbell mask register, allowing doorbell interrupts
862 * from being generated for those doorbell bits. If the hardware does not
863 * generate the interrupt on clearing the mask bit, then the driver should not
864 * implement this function!
865 *
866 * This is unusual, and hardware may not support it.
867 *
868 * Return: Zero on success, otherwise an error number.
869 */
870static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
871{
872 if (!ntb->ops->db_clear_mask)
873 return -EINVAL;
874
875 return ntb->ops->peer_db_clear_mask(ntb, db_bits);
876}
877
878/**
879 * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads
880 * @ntb: NTB device context.
881 *
882 * It is possible for some ntb hardware to be affected by errata. Hardware
883 * drivers can advise clients to avoid using scratchpads. Clients may ignore
884 * this advice, though caution is recommended.
885 *
886 * Return: Zero if it is safe to use scratchpads, or One if it is not safe.
887 */
888static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
889{
890 if (!ntb->ops->spad_is_unsafe)
891 return 0;
892
893 return ntb->ops->spad_is_unsafe(ntb);
894}
895
896/**
897 * ntb_mw_count() - get the number of scratchpads
898 * @ntb: NTB device context.
899 *
900 * Hardware and topology may support a different number of scratchpads.
901 *
902 * Return: the number of scratchpads.
903 */
904static inline int ntb_spad_count(struct ntb_dev *ntb)
905{
906 return ntb->ops->spad_count(ntb);
907}
908
909/**
910 * ntb_spad_read() - read the local scratchpad register
911 * @ntb: NTB device context.
912 * @idx: Scratchpad index.
913 *
914 * Read the local scratchpad register, and return the value.
915 *
916 * Return: The value of the local scratchpad register.
917 */
918static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx)
919{
920 return ntb->ops->spad_read(ntb, idx);
921}
922
923/**
924 * ntb_spad_write() - write the local scratchpad register
925 * @ntb: NTB device context.
926 * @idx: Scratchpad index.
927 * @val: Scratchpad value.
928 *
929 * Write the value to the local scratchpad register.
930 *
931 * Return: Zero on success, otherwise an error number.
932 */
933static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
934{
935 return ntb->ops->spad_write(ntb, idx, val);
936}
937
938/**
939 * ntb_peer_spad_addr() - address of the peer scratchpad register
940 * @ntb: NTB device context.
941 * @idx: Scratchpad index.
942 * @spad_addr: OUT - The address of the peer scratchpad register.
943 *
944 * Return the address of the peer doorbell register. This may be used, for
945 * example, by drivers that offload memory copy operations to a dma engine.
946 *
947 * Return: Zero on success, otherwise an error number.
948 */
949static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
950 phys_addr_t *spad_addr)
951{
952 return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
953}
954
955/**
956 * ntb_peer_spad_read() - read the peer scratchpad register
957 * @ntb: NTB device context.
958 * @idx: Scratchpad index.
959 *
960 * Read the peer scratchpad register, and return the value.
961 *
962 * Return: The value of the local scratchpad register.
963 */
964static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
965{
966 return ntb->ops->peer_spad_read(ntb, idx);
967}
968
969/**
970 * ntb_peer_spad_write() - write the peer scratchpad register
971 * @ntb: NTB device context.
972 * @idx: Scratchpad index.
973 * @val: Scratchpad value.
974 *
975 * Write the value to the peer scratchpad register.
976 *
977 * Return: Zero on success, otherwise an error number.
978 */
979static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val)
980{
981 return ntb->ops->peer_spad_write(ntb, idx, val);
982}
983
984#endif
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
new file mode 100644
index 000000000000..2862861366a5
--- /dev/null
+++ b/include/linux/ntb_transport.h
@@ -0,0 +1,85 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * PCIe NTB Transport Linux driver
46 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
50
51struct ntb_transport_qp;
52
53struct ntb_transport_client {
54 struct device_driver driver;
55 int (*probe)(struct device *client_dev);
56 void (*remove)(struct device *client_dev);
57};
58
59int ntb_transport_register_client(struct ntb_transport_client *drvr);
60void ntb_transport_unregister_client(struct ntb_transport_client *drvr);
61int ntb_transport_register_client_dev(char *device_name);
62void ntb_transport_unregister_client_dev(char *device_name);
63
64struct ntb_queue_handlers {
65 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
66 void *data, int len);
67 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
68 void *data, int len);
69 void (*event_handler)(void *data, int status);
70};
71
72unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
73unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
74struct ntb_transport_qp *
75ntb_transport_create_queue(void *data, struct device *client_dev,
76 const struct ntb_queue_handlers *handlers);
77void ntb_transport_free_queue(struct ntb_transport_qp *qp);
78int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
79 unsigned int len);
80int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
81 unsigned int len);
82void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
83void ntb_transport_link_up(struct ntb_transport_qp *qp);
84void ntb_transport_link_down(struct ntb_transport_qp *qp);
85bool ntb_transport_link_query(struct ntb_transport_qp *qp);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 8dbd05e70f09..c0d94ed8ce9a 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -74,7 +74,7 @@ struct nvme_dev {
74 struct blk_mq_tag_set tagset; 74 struct blk_mq_tag_set tagset;
75 struct blk_mq_tag_set admin_tagset; 75 struct blk_mq_tag_set admin_tagset;
76 u32 __iomem *dbs; 76 u32 __iomem *dbs;
77 struct pci_dev *pci_dev; 77 struct device *dev;
78 struct dma_pool *prp_page_pool; 78 struct dma_pool *prp_page_pool;
79 struct dma_pool *prp_small_pool; 79 struct dma_pool *prp_small_pool;
80 int instance; 80 int instance;
@@ -92,6 +92,7 @@ struct nvme_dev {
92 work_func_t reset_workfn; 92 work_func_t reset_workfn;
93 struct work_struct reset_work; 93 struct work_struct reset_work;
94 struct work_struct probe_work; 94 struct work_struct probe_work;
95 struct work_struct scan_work;
95 char name[12]; 96 char name[12];
96 char serial[20]; 97 char serial[20];
97 char model[40]; 98 char model[40];
@@ -146,25 +147,15 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
146 return (sector >> (ns->lba_shift - 9)); 147 return (sector >> (ns->lba_shift - 9));
147} 148}
148 149
149/** 150int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
150 * nvme_free_iod - frees an nvme_iod 151 void *buf, unsigned bufflen);
151 * @dev: The device that the I/O was submitted to 152int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
152 * @iod: The memory to free 153 void *buffer, void __user *ubuffer, unsigned bufflen,
153 */ 154 u32 *result, unsigned timeout);
154void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); 155int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
155 156int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
156int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t); 157 struct nvme_id_ns **id);
157struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 158int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
158 unsigned long addr, unsigned length);
159void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
160 struct nvme_iod *iod);
161int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
162 struct nvme_command *, u32 *);
163int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
164int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
165 u32 *result);
166int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
167 dma_addr_t dma_addr);
168int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 159int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
169 dma_addr_t dma_addr, u32 *result); 160 dma_addr_t dma_addr, u32 *result);
170int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, 161int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
diff --git a/include/linux/of.h b/include/linux/of.h
index b871ff9d81d7..edc068d19c79 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -120,6 +120,12 @@ extern struct device_node *of_aliases;
120extern struct device_node *of_stdout; 120extern struct device_node *of_stdout;
121extern raw_spinlock_t devtree_lock; 121extern raw_spinlock_t devtree_lock;
122 122
123/* flag descriptions (need to be visible even when !CONFIG_OF) */
124#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
125#define OF_DETACHED 2 /* node has been detached from the device tree */
126#define OF_POPULATED 3 /* device already created for the node */
127#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
128
123#ifdef CONFIG_OF 129#ifdef CONFIG_OF
124void of_core_init(void); 130void of_core_init(void);
125 131
@@ -128,7 +134,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
128 return fwnode && fwnode->type == FWNODE_OF; 134 return fwnode && fwnode->type == FWNODE_OF;
129} 135}
130 136
131static inline struct device_node *of_node(struct fwnode_handle *fwnode) 137static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
132{ 138{
133 return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; 139 return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
134} 140}
@@ -219,12 +225,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
219#define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) 225#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
220#endif 226#endif
221 227
222/* flag descriptions */
223#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
224#define OF_DETACHED 2 /* node has been detached from the device tree */
225#define OF_POPULATED 3 /* device already created for the node */
226#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
227
228#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) 228#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
229#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) 229#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
230 230
@@ -387,7 +387,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
387 return false; 387 return false;
388} 388}
389 389
390static inline struct device_node *of_node(struct fwnode_handle *fwnode) 390static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
391{ 391{
392 return NULL; 392 return NULL;
393} 393}
@@ -428,6 +428,11 @@ static inline struct device_node *of_find_node_opts_by_path(const char *path,
428 return NULL; 428 return NULL;
429} 429}
430 430
431static inline struct device_node *of_find_node_by_phandle(phandle handle)
432{
433 return NULL;
434}
435
431static inline struct device_node *of_get_parent(const struct device_node *node) 436static inline struct device_node *of_get_parent(const struct device_node *node)
432{ 437{
433 return NULL; 438 return NULL;
@@ -673,7 +678,10 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
673#if defined(CONFIG_OF) && defined(CONFIG_NUMA) 678#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
674extern int of_node_to_nid(struct device_node *np); 679extern int of_node_to_nid(struct device_node *np);
675#else 680#else
676static inline int of_node_to_nid(struct device_node *device) { return 0; } 681static inline int of_node_to_nid(struct device_node *device)
682{
683 return NUMA_NO_NODE;
684}
677#endif 685#endif
678 686
679static inline struct device_node *of_find_matching_node( 687static inline struct device_node *of_find_matching_node(
@@ -821,7 +829,7 @@ static inline int of_property_read_string_index(struct device_node *np,
821 * @propname: name of the property to be searched. 829 * @propname: name of the property to be searched.
822 * 830 *
823 * Search for a property in a device node. 831 * Search for a property in a device node.
824 * Returns true if the property exist false otherwise. 832 * Returns true if the property exists false otherwise.
825 */ 833 */
826static inline bool of_property_read_bool(const struct device_node *np, 834static inline bool of_property_read_bool(const struct device_node *np,
827 const char *propname) 835 const char *propname)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 22801b10cef5..cc7dd687a89d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -33,6 +33,8 @@ extern int of_device_add(struct platform_device *pdev);
33extern int of_device_register(struct platform_device *ofdev); 33extern int of_device_register(struct platform_device *ofdev);
34extern void of_device_unregister(struct platform_device *ofdev); 34extern void of_device_unregister(struct platform_device *ofdev);
35 35
36extern const void *of_device_get_match_data(const struct device *dev);
37
36extern ssize_t of_device_get_modalias(struct device *dev, 38extern ssize_t of_device_get_modalias(struct device *dev,
37 char *str, ssize_t len); 39 char *str, ssize_t len);
38 40
@@ -57,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
57#else /* CONFIG_OF */ 59#else /* CONFIG_OF */
58 60
59static inline int of_driver_match_device(struct device *dev, 61static inline int of_driver_match_device(struct device *dev,
60 struct device_driver *drv) 62 const struct device_driver *drv)
61{ 63{
62 return 0; 64 return 0;
63} 65}
@@ -65,6 +67,11 @@ static inline int of_driver_match_device(struct device *dev,
65static inline void of_device_uevent(struct device *dev, 67static inline void of_device_uevent(struct device *dev,
66 struct kobj_uevent_env *env) { } 68 struct kobj_uevent_env *env) { }
67 69
70static inline const void *of_device_get_match_data(const struct device *dev)
71{
72 return NULL;
73}
74
68static inline int of_device_get_modalias(struct device *dev, 75static inline int of_device_get_modalias(struct device *dev,
69 char *str, ssize_t len) 76 char *str, ssize_t len)
70{ 77{
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 56bc026c143f..98ba7525929e 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -23,6 +23,9 @@ struct of_dma {
23 struct device_node *of_node; 23 struct device_node *of_node;
24 struct dma_chan *(*of_dma_xlate) 24 struct dma_chan *(*of_dma_xlate)
25 (struct of_phandle_args *, struct of_dma *); 25 (struct of_phandle_args *, struct of_dma *);
26 void *(*of_dma_route_allocate)
27 (struct of_phandle_args *, struct of_dma *);
28 struct dma_router *dma_router;
26 void *of_dma_data; 29 void *of_dma_data;
27}; 30};
28 31
@@ -37,12 +40,20 @@ extern int of_dma_controller_register(struct device_node *np,
37 (struct of_phandle_args *, struct of_dma *), 40 (struct of_phandle_args *, struct of_dma *),
38 void *data); 41 void *data);
39extern void of_dma_controller_free(struct device_node *np); 42extern void of_dma_controller_free(struct device_node *np);
43
44extern int of_dma_router_register(struct device_node *np,
45 void *(*of_dma_route_allocate)
46 (struct of_phandle_args *, struct of_dma *),
47 struct dma_router *dma_router);
48#define of_dma_router_free of_dma_controller_free
49
40extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 50extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
41 const char *name); 51 const char *name);
42extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, 52extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
43 struct of_dma *ofdma); 53 struct of_dma *ofdma);
44extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, 54extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
45 struct of_dma *ofdma); 55 struct of_dma *ofdma);
56
46#else 57#else
47static inline int of_dma_controller_register(struct device_node *np, 58static inline int of_dma_controller_register(struct device_node *np,
48 struct dma_chan *(*of_dma_xlate) 59 struct dma_chan *(*of_dma_xlate)
@@ -56,6 +67,16 @@ static inline void of_dma_controller_free(struct device_node *np)
56{ 67{
57} 68}
58 69
70static inline int of_dma_router_register(struct device_node *np,
71 void *(*of_dma_route_allocate)
72 (struct of_phandle_args *, struct of_dma *),
73 struct dma_router *dma_router)
74{
75 return -ENODEV;
76}
77
78#define of_dma_router_free of_dma_controller_free
79
59static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 80static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
60 const char *name) 81 const char *name)
61{ 82{
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 587ee507965d..df9ef3801812 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -37,7 +37,7 @@ extern bool of_fdt_is_big_endian(const void *blob,
37 unsigned long node); 37 unsigned long node);
38extern int of_fdt_match(const void *blob, unsigned long node, 38extern int of_fdt_match(const void *blob, unsigned long node,
39 const char *const *compat); 39 const char *const *compat);
40extern void of_fdt_unflatten_tree(unsigned long *blob, 40extern void of_fdt_unflatten_tree(const unsigned long *blob,
41 struct device_node **mynodes); 41 struct device_node **mynodes);
42 42
43/* TBD: Temporary export of fdt globals - remove when code fully merged */ 43/* TBD: Temporary export of fdt globals - remove when code fully merged */
@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
64extern int early_init_dt_scan_memory(unsigned long node, const char *uname, 64extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
65 int depth, void *data); 65 int depth, void *data);
66extern void early_init_fdt_scan_reserved_mem(void); 66extern void early_init_fdt_scan_reserved_mem(void);
67extern void early_init_fdt_reserve_self(void);
67extern void early_init_dt_add_memory_arch(u64 base, u64 size); 68extern void early_init_dt_add_memory_arch(u64 base, u64 size);
68extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, 69extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
69 bool no_map); 70 bool no_map);
@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
91extern void of_fdt_limit_memory(int limit); 92extern void of_fdt_limit_memory(int limit);
92#else /* CONFIG_OF_FLATTREE */ 93#else /* CONFIG_OF_FLATTREE */
93static inline void early_init_fdt_scan_reserved_mem(void) {} 94static inline void early_init_fdt_scan_reserved_mem(void) {}
95static inline void early_init_fdt_reserve_self(void) {}
94static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 96static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
95static inline void unflatten_device_tree(void) {} 97static inline void unflatten_device_tree(void) {}
96static inline void unflatten_and_copy_device_tree(void) {} 98static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 7bc92e050608..f8bcd0e21a26 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -45,6 +45,8 @@ int of_graph_parse_endpoint(const struct device_node *node,
45struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id); 45struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
46struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, 46struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
47 struct device_node *previous); 47 struct device_node *previous);
48struct device_node *of_graph_get_endpoint_by_regs(
49 const struct device_node *parent, int port_reg, int reg);
48struct device_node *of_graph_get_remote_port_parent( 50struct device_node *of_graph_get_remote_port_parent(
49 const struct device_node *node); 51 const struct device_node *node);
50struct device_node *of_graph_get_remote_port(const struct device_node *node); 52struct device_node *of_graph_get_remote_port(const struct device_node *node);
@@ -69,6 +71,12 @@ static inline struct device_node *of_graph_get_next_endpoint(
69 return NULL; 71 return NULL;
70} 72}
71 73
74static inline struct device_node *of_graph_get_endpoint_by_regs(
75 const struct device_node *parent, int port_reg, int reg)
76{
77 return NULL;
78}
79
72static inline struct device_node *of_graph_get_remote_port_parent( 80static inline struct device_node *of_graph_get_remote_port_parent(
73 const struct device_node *node) 81 const struct device_node *node)
74{ 82{
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 44b2f6f7bbd8..7deecb7bca5e 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -32,6 +32,8 @@ enum oom_scan_t {
32/* Thread is the potential origin of an oom condition; kill first on oom */ 32/* Thread is the potential origin of an oom condition; kill first on oom */
33#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) 33#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
34 34
35extern struct mutex oom_lock;
36
35static inline void set_current_oom_origin(void) 37static inline void set_current_oom_origin(void)
36{ 38{
37 current->signal->oom_flags |= OOM_FLAG_ORIGIN; 39 current->signal->oom_flags |= OOM_FLAG_ORIGIN;
@@ -47,9 +49,7 @@ static inline bool oom_task_origin(const struct task_struct *p)
47 return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); 49 return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
48} 50}
49 51
50extern void mark_tsk_oom_victim(struct task_struct *tsk); 52extern void mark_oom_victim(struct task_struct *tsk);
51
52extern void unmark_oom_victim(void);
53 53
54extern unsigned long oom_badness(struct task_struct *p, 54extern unsigned long oom_badness(struct task_struct *p,
55 struct mem_cgroup *memcg, const nodemask_t *nodemask, 55 struct mem_cgroup *memcg, const nodemask_t *nodemask,
@@ -62,9 +62,6 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
62 struct mem_cgroup *memcg, nodemask_t *nodemask, 62 struct mem_cgroup *memcg, nodemask_t *nodemask,
63 const char *message); 63 const char *message);
64 64
65extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
66extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
67
68extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 65extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
69 int order, const nodemask_t *nodemask, 66 int order, const nodemask_t *nodemask,
70 struct mem_cgroup *memcg); 67 struct mem_cgroup *memcg);
@@ -75,6 +72,9 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
75 72
76extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 73extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
77 int order, nodemask_t *mask, bool force_kill); 74 int order, nodemask_t *mask, bool force_kill);
75
76extern void exit_oom_victim(void);
77
78extern int register_oom_notifier(struct notifier_block *nb); 78extern int register_oom_notifier(struct notifier_block *nb);
79extern int unregister_oom_notifier(struct notifier_block *nb); 79extern int unregister_oom_notifier(struct notifier_block *nb);
80 80
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
631 1 << PG_private | 1 << PG_private_2 | \ 631 1 << PG_private | 1 << PG_private_2 | \
632 1 << PG_writeback | 1 << PG_reserved | \ 632 1 << PG_writeback | 1 << PG_reserved | \
633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
634 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ 634 1 << PG_unevictable | __PG_MLOCKED | \
635 __PG_COMPOUND_LOCK) 635 __PG_COMPOUND_LOCK)
636 636
637/* 637/*
638 * Flags checked when a page is prepped for return by the page allocator. 638 * Flags checked when a page is prepped for return by the page allocator.
639 * Pages being prepped should not have any flags set. It they are set, 639 * Pages being prepped should not have these flags set. It they are set,
640 * there has been a kernel bug or struct page corruption. 640 * there has been a kernel bug or struct page corruption.
641 *
642 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
643 * alloc-free cycle to prevent from reusing the page.
641 */ 644 */
642#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 645#define PAGE_FLAGS_CHECK_AT_PREP \
646 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
643 647
644#define PAGE_FLAGS_PRIVATE \ 648#define PAGE_FLAGS_PRIVATE \
645 (1 << PG_private | 1 << PG_private_2) 649 (1 << PG_private | 1 << PG_private_2)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index b48c3471c254..cacaabea8a09 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops;
8extern void __reset_page_owner(struct page *page, unsigned int order); 8extern void __reset_page_owner(struct page *page, unsigned int order);
9extern void __set_page_owner(struct page *page, 9extern void __set_page_owner(struct page *page,
10 unsigned int order, gfp_t gfp_mask); 10 unsigned int order, gfp_t gfp_mask);
11extern gfp_t __get_page_owner_gfp(struct page *page);
11 12
12static inline void reset_page_owner(struct page *page, unsigned int order) 13static inline void reset_page_owner(struct page *page, unsigned int order)
13{ 14{
@@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page,
25 26
26 __set_page_owner(page, order, gfp_mask); 27 __set_page_owner(page, order, gfp_mask);
27} 28}
29
30static inline gfp_t get_page_owner_gfp(struct page *page)
31{
32 if (likely(!page_owner_inited))
33 return 0;
34
35 return __get_page_owner_gfp(page);
36}
28#else 37#else
29static inline void reset_page_owner(struct page *page, unsigned int order) 38static inline void reset_page_owner(struct page *page, unsigned int order)
30{ 39{
@@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page,
33 unsigned int order, gfp_t gfp_mask) 42 unsigned int order, gfp_t gfp_mask)
34{ 43{
35} 44}
45static inline gfp_t get_page_owner_gfp(struct page *page)
46{
47 return 0;
48}
36 49
37#endif /* CONFIG_PAGE_OWNER */ 50#endif /* CONFIG_PAGE_OWNER */
38#endif /* __LINUX_PAGE_OWNER_H */ 51#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f7065c..a6c78e00ea96 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -651,7 +651,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
651int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 651int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
652 pgoff_t index, gfp_t gfp_mask); 652 pgoff_t index, gfp_t gfp_mask);
653extern void delete_from_page_cache(struct page *page); 653extern void delete_from_page_cache(struct page *page);
654extern void __delete_from_page_cache(struct page *page, void *shadow); 654extern void __delete_from_page_cache(struct page *page, void *shadow,
655 struct mem_cgroup *memcg);
655int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); 656int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
656 657
657/* 658/*
@@ -670,4 +671,10 @@ static inline int add_to_page_cache(struct page *page,
670 return error; 671 return error;
671} 672}
672 673
674static inline unsigned long dir_pages(struct inode *inode)
675{
676 return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
677 PAGE_CACHE_SHIFT;
678}
679
673#endif /* _LINUX_PAGEMAP_H */ 680#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index c22f12547324..58e3c64c6b49 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -13,6 +13,7 @@
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/irqreturn.h> 14#include <linux/irqreturn.h>
15#include <linux/semaphore.h> 15#include <linux/semaphore.h>
16#include <linux/device.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
17#include <uapi/linux/parport.h> 18#include <uapi/linux/parport.h>
18 19
@@ -145,6 +146,8 @@ struct pardevice {
145 unsigned int flags; 146 unsigned int flags;
146 struct pardevice *next; 147 struct pardevice *next;
147 struct pardevice *prev; 148 struct pardevice *prev;
149 struct device dev;
150 bool devmodel;
148 struct parport_state *state; /* saved status over preemption */ 151 struct parport_state *state; /* saved status over preemption */
149 wait_queue_head_t wait_q; 152 wait_queue_head_t wait_q;
150 unsigned long int time; 153 unsigned long int time;
@@ -156,6 +159,8 @@ struct pardevice {
156 void * sysctl_table; 159 void * sysctl_table;
157}; 160};
158 161
162#define to_pardevice(n) container_of(n, struct pardevice, dev)
163
159/* IEEE1284 information */ 164/* IEEE1284 information */
160 165
161/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL 166/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
@@ -195,7 +200,7 @@ struct parport {
195 * This may unfortulately be null if the 200 * This may unfortulately be null if the
196 * port has a legacy driver. 201 * port has a legacy driver.
197 */ 202 */
198 203 struct device bus_dev; /* to link with the bus */
199 struct parport *physport; 204 struct parport *physport;
200 /* If this is a non-default mux 205 /* If this is a non-default mux
201 parport, i.e. we're a clone of a real 206 parport, i.e. we're a clone of a real
@@ -245,15 +250,26 @@ struct parport {
245 struct parport *slaves[3]; 250 struct parport *slaves[3];
246}; 251};
247 252
253#define to_parport_dev(n) container_of(n, struct parport, bus_dev)
254
248#define DEFAULT_SPIN_TIME 500 /* us */ 255#define DEFAULT_SPIN_TIME 500 /* us */
249 256
250struct parport_driver { 257struct parport_driver {
251 const char *name; 258 const char *name;
252 void (*attach) (struct parport *); 259 void (*attach) (struct parport *);
253 void (*detach) (struct parport *); 260 void (*detach) (struct parport *);
261 void (*match_port)(struct parport *);
262 int (*probe)(struct pardevice *);
263 struct device_driver driver;
264 bool devmodel;
254 struct list_head list; 265 struct list_head list;
255}; 266};
256 267
268#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
269
270int parport_bus_init(void);
271void parport_bus_exit(void);
272
257/* parport_register_port registers a new parallel port at the given 273/* parport_register_port registers a new parallel port at the given
258 address (if one does not already exist) and returns a pointer to it. 274 address (if one does not already exist) and returns a pointer to it.
259 This entails claiming the I/O region, IRQ and DMA. NULL is returned 275 This entails claiming the I/O region, IRQ and DMA. NULL is returned
@@ -272,10 +288,20 @@ void parport_announce_port (struct parport *port);
272extern void parport_remove_port(struct parport *port); 288extern void parport_remove_port(struct parport *port);
273 289
274/* Register a new high-level driver. */ 290/* Register a new high-level driver. */
275extern int parport_register_driver (struct parport_driver *); 291
292int __must_check __parport_register_driver(struct parport_driver *,
293 struct module *,
294 const char *mod_name);
295/*
296 * parport_register_driver must be a macro so that KBUILD_MODNAME can
297 * be expanded
298 */
299#define parport_register_driver(driver) \
300 __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
276 301
277/* Unregister a high-level driver. */ 302/* Unregister a high-level driver. */
278extern void parport_unregister_driver (struct parport_driver *); 303extern void parport_unregister_driver (struct parport_driver *);
304void parport_unregister_driver(struct parport_driver *);
279 305
280/* If parport_register_driver doesn't fit your needs, perhaps 306/* If parport_register_driver doesn't fit your needs, perhaps
281 * parport_find_xxx does. */ 307 * parport_find_xxx does. */
@@ -288,6 +314,15 @@ extern irqreturn_t parport_irq_handler(int irq, void *dev_id);
288/* Reference counting for ports. */ 314/* Reference counting for ports. */
289extern struct parport *parport_get_port (struct parport *); 315extern struct parport *parport_get_port (struct parport *);
290extern void parport_put_port (struct parport *); 316extern void parport_put_port (struct parport *);
317void parport_del_port(struct parport *);
318
319struct pardev_cb {
320 int (*preempt)(void *);
321 void (*wakeup)(void *);
322 void *private;
323 void (*irq_func)(void *);
324 unsigned int flags;
325};
291 326
292/* parport_register_device declares that a device is connected to a 327/* parport_register_device declares that a device is connected to a
293 port, and tells the kernel all it needs to know. 328 port, and tells the kernel all it needs to know.
@@ -301,6 +336,10 @@ struct pardevice *parport_register_device(struct parport *port,
301 void (*irq_func)(void *), 336 void (*irq_func)(void *),
302 int flags, void *handle); 337 int flags, void *handle);
303 338
339struct pardevice *
340parport_register_dev_model(struct parport *port, const char *name,
341 const struct pardev_cb *par_dev_cb, int cnt);
342
304/* parport_unregister unlinks a device from the chain. */ 343/* parport_unregister unlinks a device from the chain. */
305extern void parport_unregister_device(struct pardevice *dev); 344extern void parport_unregister_device(struct pardevice *dev);
306 345
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
index 3cc21c9cc1e8..9fade5dd2e86 100644
--- a/include/linux/pata_arasan_cf_data.h
+++ b/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller platform data header file 4 * Arasan Compact Flash host controller platform data header file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cb63a7b522ef..fcff8f865341 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2330,6 +2330,8 @@
2330#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea 2330#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
2331#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb 2331#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
2332 2332
2333#define PCI_VENDOR_ID_CAVIUM 0x177d
2334
2333#define PCI_VENDOR_ID_BELKIN 0x1799 2335#define PCI_VENDOR_ID_BELKIN 0x1799
2334#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f 2336#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
2335 2337
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1b82d44b0a02..2027809433b3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -300,6 +300,11 @@ struct pmu {
300 * Free pmu-private AUX data structures 300 * Free pmu-private AUX data structures
301 */ 301 */
302 void (*free_aux) (void *aux); /* optional */ 302 void (*free_aux) (void *aux); /* optional */
303
304 /*
305 * Filter events for PMU-specific reasons.
306 */
307 int (*filter_match) (struct perf_event *event); /* optional */
303}; 308};
304 309
305/** 310/**
@@ -479,7 +484,7 @@ struct perf_event {
479 void *overflow_handler_context; 484 void *overflow_handler_context;
480 485
481#ifdef CONFIG_EVENT_TRACING 486#ifdef CONFIG_EVENT_TRACING
482 struct ftrace_event_call *tp_event; 487 struct trace_event_call *tp_event;
483 struct event_filter *filter; 488 struct event_filter *filter;
484#ifdef CONFIG_FUNCTION_TRACER 489#ifdef CONFIG_FUNCTION_TRACER
485 struct ftrace_ops ftrace_ops; 490 struct ftrace_ops ftrace_ops;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 685809835b5c..a26c3f84b8dd 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -181,6 +181,9 @@ struct mii_bus {
181 /* PHY addresses to be ignored when probing */ 181 /* PHY addresses to be ignored when probing */
182 u32 phy_mask; 182 u32 phy_mask;
183 183
184 /* PHY addresses to ignore the TA/read failure */
185 u32 phy_ignore_ta_mask;
186
184 /* 187 /*
185 * Pointer to an array of interrupts, each PHY's 188 * Pointer to an array of interrupts, each PHY's
186 * interrupt at the index matching its address 189 * interrupt at the index matching its address
@@ -675,6 +678,17 @@ static inline bool phy_is_internal(struct phy_device *phydev)
675} 678}
676 679
677/** 680/**
681 * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
682 * is RGMII (all variants)
683 * @phydev: the phy_device struct
684 */
685static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
686{
687 return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
688 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
689}
690
691/**
678 * phy_write_mmd - Convenience function for writing a register 692 * phy_write_mmd - Convenience function for writing a register
679 * on an MMD on a given PHY. 693 * on an MMD on a given PHY.
680 * @phydev: The phy_device struct 694 * @phydev: The phy_device struct
diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h
new file mode 100644
index 000000000000..50aed92ea89c
--- /dev/null
+++ b/include/linux/phy/phy-sun4i-usb.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2015 Hans de Goede <hdegoede@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef PHY_SUN4I_USB_H_
15#define PHY_SUN4I_USB_H_
16
17#include "phy.h"
18
19/**
20 * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect
21 * @phy: reference to a sun4i usb phy
22 * @enabled: wether to enable or disable squelch detect
23 */
24void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled);
25
26#endif
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index a0197fa1b116..8cf05e341cff 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -133,6 +133,8 @@ struct phy *devm_phy_get(struct device *dev, const char *string);
133struct phy *devm_phy_optional_get(struct device *dev, const char *string); 133struct phy *devm_phy_optional_get(struct device *dev, const char *string);
134struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, 134struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
135 const char *con_id); 135 const char *con_id);
136struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
137 int index);
136void phy_put(struct phy *phy); 138void phy_put(struct phy *phy);
137void devm_phy_put(struct device *dev, struct phy *phy); 139void devm_phy_put(struct device *dev, struct phy *phy);
138struct phy *of_phy_get(struct device_node *np, const char *con_id); 140struct phy *of_phy_get(struct device_node *np, const char *con_id);
@@ -261,6 +263,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev,
261 return ERR_PTR(-ENOSYS); 263 return ERR_PTR(-ENOSYS);
262} 264}
263 265
266static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
267 struct device_node *np,
268 int index)
269{
270 return ERR_PTR(-ENOSYS);
271}
272
264static inline void phy_put(struct phy *phy) 273static inline void phy_put(struct phy *phy)
265{ 274{
266} 275}
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 18eccefea06e..d7e5d608faa7 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -142,7 +142,7 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
142 s = pinctrl_lookup_state(p, name); 142 s = pinctrl_lookup_state(p, name);
143 if (IS_ERR(s)) { 143 if (IS_ERR(s)) {
144 pinctrl_put(p); 144 pinctrl_put(p);
145 return ERR_PTR(PTR_ERR(s)); 145 return ERR_CAST(s);
146 } 146 }
147 147
148 ret = pinctrl_select_state(p, s); 148 ret = pinctrl_select_state(p, s);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 66e4697516de..9ba59fcba549 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -127,7 +127,7 @@ struct pinctrl_ops {
127 */ 127 */
128struct pinctrl_desc { 128struct pinctrl_desc {
129 const char *name; 129 const char *name;
130 struct pinctrl_pin_desc const *pins; 130 const struct pinctrl_pin_desc *pins;
131 unsigned int npins; 131 unsigned int npins;
132 const struct pinctrl_ops *pctlops; 132 const struct pinctrl_ops *pctlops;
133 const struct pinmux_ops *pmxops; 133 const struct pinmux_ops *pmxops;
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 511bda9ed4bf..ace60d775b20 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -56,6 +56,9 @@ struct pinctrl_dev;
56 * depending on whether the GPIO is configured as input or output, 56 * depending on whether the GPIO is configured as input or output,
57 * a direction selector function may be implemented as a backing 57 * a direction selector function may be implemented as a backing
58 * to the GPIO controllers that need pin muxing. 58 * to the GPIO controllers that need pin muxing.
59 * @strict: do not allow simultaneous use of the same pin for GPIO and another
60 * function. Check both gpio_owner and mux_owner strictly before approving
61 * the pin request.
59 */ 62 */
60struct pinmux_ops { 63struct pinmux_ops {
61 int (*request) (struct pinctrl_dev *pctldev, unsigned offset); 64 int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
@@ -66,7 +69,7 @@ struct pinmux_ops {
66 int (*get_function_groups) (struct pinctrl_dev *pctldev, 69 int (*get_function_groups) (struct pinctrl_dev *pctldev,
67 unsigned selector, 70 unsigned selector,
68 const char * const **groups, 71 const char * const **groups,
69 unsigned * const num_groups); 72 unsigned *num_groups);
70 int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, 73 int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
71 unsigned group_selector); 74 unsigned group_selector);
72 int (*gpio_request_enable) (struct pinctrl_dev *pctldev, 75 int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
@@ -79,6 +82,7 @@ struct pinmux_ops {
79 struct pinctrl_gpio_range *range, 82 struct pinctrl_gpio_range *range,
80 unsigned offset, 83 unsigned offset,
81 bool input); 84 bool input);
85 bool strict;
82}; 86};
83 87
84#endif /* CONFIG_PINMUX */ 88#endif /* CONFIG_PINMUX */
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h
deleted file mode 100644
index 471fffebbeb4..000000000000
--- a/include/linux/platform_data/dma-rcar-audmapp.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * This is for Renesas R-Car Audio-DMAC-peri-peri.
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This file is based on the include/linux/sh_dma.h
8 *
9 * Header for the new SH dmaengine driver
10 *
11 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17#ifndef SH_AUDMAPP_H
18#define SH_AUDMAPP_H
19
20#include <linux/dmaengine.h>
21
22struct audmapp_slave_config {
23 int slave_id;
24 dma_addr_t src;
25 dma_addr_t dst;
26 u32 chcr;
27};
28
29struct audmapp_pdata {
30 struct audmapp_slave_config *slave;
31 int slave_num;
32};
33
34#endif /* SH_AUDMAPP_H */
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h
new file mode 100644
index 000000000000..88b0db7bee74
--- /dev/null
+++ b/include/linux/platform_data/gpio-ath79.h
@@ -0,0 +1,19 @@
1/*
2 * Atheros AR7XXX/AR9XXX GPIO controller platform data
3 *
4 * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
12#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H
13
14struct ath79_gpio_platform_data {
15 unsigned ngpios;
16 bool oe_inverted;
17};
18
19#endif
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h
index 9248e3a7e333..5e3ff653900c 100644
--- a/include/linux/platform_data/keyboard-spear.h
+++ b/include/linux/platform_data/keyboard-spear.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010 ST Microelectronics 2 * Copyright (C) 2010 ST Microelectronics
3 * Rajeev Kumar<rajeev-dlh.kumar@st.com> 3 * Rajeev Kumar <rajeevkumar.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 044a124bfbbc..21b15f6fee25 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
8#ifndef __MACB_PDATA_H__ 8#ifndef __MACB_PDATA_H__
9#define __MACB_PDATA_H__ 9#define __MACB_PDATA_H__
10 10
11/**
12 * struct macb_platform_data - platform data for MACB Ethernet
13 * @phy_mask: phy mask passed when register the MDIO bus
14 * within the driver
15 * @phy_irq_pin: PHY IRQ
16 * @is_rmii: using RMII interface?
17 * @rev_eth_addr: reverse Ethernet address byte order
18 */
11struct macb_platform_data { 19struct macb_platform_data {
12 u32 phy_mask; 20 u32 phy_mask;
13 int phy_irq_pin; /* PHY IRQ */ 21 int phy_irq_pin;
14 u8 is_rmii; /* using RMII interface? */ 22 u8 is_rmii;
15 u8 rev_eth_addr; /* reverse Ethernet address byte order */ 23 u8 rev_eth_addr;
16}; 24};
17 25
18#endif /* __MACB_PDATA_H__ */ 26#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 75f70f6ac137..e1571efa3f2b 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
43 enum wp_types wp_type; 43 enum wp_types wp_type;
44 enum cd_types cd_type; 44 enum cd_types cd_type;
45 int max_bus_width; 45 int max_bus_width;
46 unsigned int f_max;
47 bool support_vsel; 46 bool support_vsel;
48 unsigned int delay_line; 47 unsigned int delay_line;
49}; 48};
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
new file mode 100644
index 000000000000..ac91707dabcb
--- /dev/null
+++ b/include/linux/platform_data/nfcmrvl.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2015, Marvell International Ltd.
3 *
4 * This software file (the "File") is distributed by Marvell International
5 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
6 * (the "License"). You may use, redistribute and/or modify this File in
7 * accordance with the terms and conditions of the License, a copy of which
8 * is available on the worldwide web at
9 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
10 *
11 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
12 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
13 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
14 * this warranty disclaimer.
15 */
16
17#ifndef _NFCMRVL_PTF_H_
18#define _NFCMRVL_PTF_H_
19
20struct nfcmrvl_platform_data {
21 /*
22 * Generic
23 */
24
25 /* GPIO that is wired to RESET_N signal */
26 unsigned int reset_n_io;
27 /* Tell if transport is muxed in HCI one */
28 unsigned int hci_muxed;
29
30 /*
31 * UART specific
32 */
33
34 /* Tell if UART needs flow control at init */
35 unsigned int flow_control;
36 /* Tell if firmware supports break control for power management */
37 unsigned int break_control;
38};
39
40#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st-nci.h
index b023373d9874..d9d400a297bd 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st-nci.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver include for the ST21NFCB NFC chip. 2 * Driver include for ST NCI NFC chip family.
3 * 3 *
4 * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. 4 * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -16,14 +16,14 @@
16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#ifndef _ST21NFCB_NCI_H_ 19#ifndef _ST_NCI_H_
20#define _ST21NFCB_NCI_H_ 20#define _ST_NCI_H_
21 21
22#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" 22#define ST_NCI_DRIVER_NAME "st_nci"
23 23
24struct st21nfcb_nfc_platform_data { 24struct st_nci_nfc_platform_data {
25 unsigned int gpio_reset; 25 unsigned int gpio_reset;
26 unsigned int irq_polarity; 26 unsigned int irq_polarity;
27}; 27};
28 28
29#endif /* _ST21NFCB_NCI_H_ */ 29#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
new file mode 100644
index 000000000000..d9d400a297bd
--- /dev/null
+++ b/include/linux/platform_data/st_nci.h
@@ -0,0 +1,29 @@
1/*
2 * Driver include for ST NCI NFC chip family.
3 *
4 * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _ST_NCI_H_
20#define _ST_NCI_H_
21
22#define ST_NCI_DRIVER_NAME "st_nci"
23
24struct st_nci_nfc_platform_data {
25 unsigned int gpio_reset;
26 unsigned int irq_polarity;
27};
28
29#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/usb-rcar-gen2-phy.h b/include/linux/platform_data/usb-rcar-gen2-phy.h
deleted file mode 100644
index dd3ba46c0d90..000000000000
--- a/include/linux/platform_data/usb-rcar-gen2-phy.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2013 Renesas Solutions Corp.
3 * Copyright (C) 2013 Cogent Embedded, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __USB_RCAR_GEN2_PHY_H
11#define __USB_RCAR_GEN2_PHY_H
12
13#include <linux/types.h>
14
15struct rcar_gen2_phy_platform_data {
16 /* USB channel 0 configuration */
17 bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */
18 /* USB channel 2 configuration */
19 bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */
20};
21
22#endif
diff --git a/include/linux/platform_data/video-msm_fb.h b/include/linux/platform_data/video-msm_fb.h
deleted file mode 100644
index 31449be3eadb..000000000000
--- a/include/linux/platform_data/video-msm_fb.h
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * Internal shared definitions for various MSM framebuffer parts.
3 *
4 * Copyright (C) 2007 Google Incorporated
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef _MSM_FB_H_
17#define _MSM_FB_H_
18
19#include <linux/device.h>
20
21struct mddi_info;
22
23struct msm_fb_data {
24 int xres; /* x resolution in pixels */
25 int yres; /* y resolution in pixels */
26 int width; /* disply width in mm */
27 int height; /* display height in mm */
28 unsigned output_format;
29};
30
31struct msmfb_callback {
32 void (*func)(struct msmfb_callback *);
33};
34
35enum {
36 MSM_MDDI_PMDH_INTERFACE,
37 MSM_MDDI_EMDH_INTERFACE,
38 MSM_EBI2_INTERFACE,
39};
40
41#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
42
43struct msm_panel_data {
44 /* turns off the fb memory */
45 int (*suspend)(struct msm_panel_data *);
46 /* turns on the fb memory */
47 int (*resume)(struct msm_panel_data *);
48 /* turns off the panel */
49 int (*blank)(struct msm_panel_data *);
50 /* turns on the panel */
51 int (*unblank)(struct msm_panel_data *);
52 void (*wait_vsync)(struct msm_panel_data *);
53 void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
54 void (*clear_vsync)(struct msm_panel_data *);
55 /* from the enum above */
56 unsigned interface_type;
57 /* data to be passed to the fb driver */
58 struct msm_fb_data *fb_data;
59
60 /* capabilities supported by the panel */
61 uint32_t caps;
62};
63
64struct msm_mddi_client_data {
65 void (*suspend)(struct msm_mddi_client_data *);
66 void (*resume)(struct msm_mddi_client_data *);
67 void (*activate_link)(struct msm_mddi_client_data *);
68 void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
69 uint32_t reg);
70 uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
71 void (*auto_hibernate)(struct msm_mddi_client_data *, int);
72 /* custom data that needs to be passed from the board file to a
73 * particular client */
74 void *private_client_data;
75 struct resource *fb_resource;
76 /* from the list above */
77 unsigned interface_type;
78};
79
80struct msm_mddi_platform_data {
81 unsigned int clk_rate;
82 void (*power_client)(struct msm_mddi_client_data *, int on);
83
84 /* fixup the mfr name, product id */
85 void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
86
87 struct resource *fb_resource; /*optional*/
88 /* number of clients in the list that follows */
89 int num_clients;
90 /* array of client information of clients */
91 struct {
92 unsigned product_id; /* mfr id in top 16 bits, product id
93 * in lower 16 bits
94 */
95 char *name; /* the device name will be the platform
96 * device name registered for the client,
97 * it should match the name of the associated
98 * driver
99 */
100 unsigned id; /* id for mddi client device node, will also
101 * be used as device id of panel devices, if
102 * the client device will have multiple panels
103 * space must be left here for them
104 */
105 void *client_data; /* required private client data */
106 unsigned int clk_rate; /* optional: if the client requires a
107 * different mddi clk rate
108 */
109 } client_platform_data[];
110};
111
112struct mdp_blit_req;
113struct fb_info;
114struct mdp_device {
115 struct device dev;
116 void (*dma)(struct mdp_device *mpd, uint32_t addr,
117 uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
118 uint32_t y, struct msmfb_callback *callback, int interface);
119 void (*dma_wait)(struct mdp_device *mdp);
120 int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
121 struct mdp_blit_req *req);
122 void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
123};
124
125struct class_interface;
126int register_mdp_client(struct class_interface *class_intf);
127
128/**** private client data structs go below this line ***/
129
130struct msm_mddi_bridge_platform_data {
131 /* from board file */
132 int (*init)(struct msm_mddi_bridge_platform_data *,
133 struct msm_mddi_client_data *);
134 int (*uninit)(struct msm_mddi_bridge_platform_data *,
135 struct msm_mddi_client_data *);
136 /* passed to panel for use by the fb driver */
137 int (*blank)(struct msm_mddi_bridge_platform_data *,
138 struct msm_mddi_client_data *);
139 int (*unblank)(struct msm_mddi_bridge_platform_data *,
140 struct msm_mddi_client_data *);
141 struct msm_fb_data fb_data;
142};
143
144
145
146#endif
diff --git a/include/linux/platform_data/wkup_m3.h b/include/linux/platform_data/wkup_m3.h
new file mode 100644
index 000000000000..3f1d77effd71
--- /dev/null
+++ b/include/linux/platform_data/wkup_m3.h
@@ -0,0 +1,30 @@
1/*
2 * TI Wakeup M3 remote processor platform data
3 *
4 * Copyright (C) 2014-2015 Texas Instruments, Inc.
5 *
6 * Dave Gerlach <d-gerlach@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H
19#define _LINUX_PLATFORM_DATA_WKUP_M3_H
20
21struct platform_device;
22
23struct wkup_m3_platform_data {
24 const char *reset_name;
25
26 int (*assert_reset)(struct platform_device *pdev, const char *name);
27 int (*deassert_reset)(struct platform_device *pdev, const char *name);
28};
29
30#endif /* _LINUX_PLATFORM_DATA_WKUP_M3_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 58f1e75ba105..bba08f44cc97 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -222,6 +222,15 @@ static inline void platform_set_drvdata(struct platform_device *pdev,
222 module_driver(__platform_driver, platform_driver_register, \ 222 module_driver(__platform_driver, platform_driver_register, \
223 platform_driver_unregister) 223 platform_driver_unregister)
224 224
225/* builtin_platform_driver() - Helper macro for builtin drivers that
226 * don't do anything special in driver init. This eliminates some
227 * boilerplate. Each driver may only use this macro once, and
228 * calling it replaces device_initcall(). Note this is meant to be
229 * a parallel of module_platform_driver() above, but w/o _exit stuff.
230 */
231#define builtin_platform_driver(__platform_driver) \
232 builtin_driver(__platform_driver, platform_driver_register)
233
225/* module_platform_driver_probe() - Helper macro for drivers that don't do 234/* module_platform_driver_probe() - Helper macro for drivers that don't do
226 * anything special in module init/exit. This eliminates a lot of 235 * anything special in module init/exit. This eliminates a lot of
227 * boilerplate. Each module may only use this macro once, and 236 * boilerplate. Each module may only use this macro once, and
@@ -240,6 +249,20 @@ static void __exit __platform_driver##_exit(void) \
240} \ 249} \
241module_exit(__platform_driver##_exit); 250module_exit(__platform_driver##_exit);
242 251
252/* builtin_platform_driver_probe() - Helper macro for drivers that don't do
253 * anything special in device init. This eliminates some boilerplate. Each
254 * driver may only use this macro once, and using it replaces device_initcall.
255 * This is meant to be a parallel of module_platform_driver_probe above, but
256 * without the __exit parts.
257 */
258#define builtin_platform_driver_probe(__platform_driver, __platform_probe) \
259static int __init __platform_driver##_init(void) \
260{ \
261 return platform_driver_probe(&(__platform_driver), \
262 __platform_probe); \
263} \
264device_initcall(__platform_driver##_init); \
265
243#define platform_create_bundle(driver, probe, res, n_res, data, size) \ 266#define platform_create_bundle(driver, probe, res, n_res, data, size) \
244 __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) 267 __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
245extern struct platform_device *__platform_create_bundle( 268extern struct platform_device *__platform_create_bundle(
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 2d29c64f8fb1..35d599e7250d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,6 +342,18 @@ struct dev_pm_ops {
342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
343#endif 343#endif
344 344
345#ifdef CONFIG_PM_SLEEP
346#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
347 .suspend_noirq = suspend_fn, \
348 .resume_noirq = resume_fn, \
349 .freeze_noirq = suspend_fn, \
350 .thaw_noirq = resume_fn, \
351 .poweroff_noirq = suspend_fn, \
352 .restore_noirq = resume_fn,
353#else
354#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
355#endif
356
345#ifdef CONFIG_PM 357#ifdef CONFIG_PM
346#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 358#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
347 .runtime_suspend = suspend_fn, \ 359 .runtime_suspend = suspend_fn, \
@@ -529,6 +541,7 @@ enum rpm_request {
529}; 541};
530 542
531struct wakeup_source; 543struct wakeup_source;
544struct wake_irq;
532struct pm_domain_data; 545struct pm_domain_data;
533 546
534struct pm_subsys_data { 547struct pm_subsys_data {
@@ -568,6 +581,7 @@ struct dev_pm_info {
568 unsigned long timer_expires; 581 unsigned long timer_expires;
569 struct work_struct work; 582 struct work_struct work;
570 wait_queue_head_t wait_queue; 583 wait_queue_head_t wait_queue;
584 struct wake_irq *wakeirq;
571 atomic_t usage_count; 585 atomic_t usage_count;
572 atomic_t child_count; 586 atomic_t child_count;
573 unsigned int disable_depth:3; 587 unsigned int disable_depth:3;
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 0b0039634410..25266c600021 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -20,6 +20,16 @@ struct pm_clk_notifier_block {
20 20
21struct clk; 21struct clk;
22 22
23#ifdef CONFIG_PM
24extern int pm_clk_runtime_suspend(struct device *dev);
25extern int pm_clk_runtime_resume(struct device *dev);
26#define USE_PM_CLK_RUNTIME_OPS \
27 .runtime_suspend = pm_clk_runtime_suspend, \
28 .runtime_resume = pm_clk_runtime_resume,
29#else
30#define USE_PM_CLK_RUNTIME_OPS
31#endif
32
23#ifdef CONFIG_PM_CLK 33#ifdef CONFIG_PM_CLK
24static inline bool pm_clk_no_clocks(struct device *dev) 34static inline bool pm_clk_no_clocks(struct device *dev)
25{ 35{
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
new file mode 100644
index 000000000000..cd5b62db9084
--- /dev/null
+++ b/include/linux/pm_wakeirq.h
@@ -0,0 +1,51 @@
1/*
2 * pm_wakeirq.h - Device wakeirq helper functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _LINUX_PM_WAKEIRQ_H
15#define _LINUX_PM_WAKEIRQ_H
16
17#ifdef CONFIG_PM
18
19extern int dev_pm_set_wake_irq(struct device *dev, int irq);
20extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
21 int irq);
22extern void dev_pm_clear_wake_irq(struct device *dev);
23extern void dev_pm_enable_wake_irq(struct device *dev);
24extern void dev_pm_disable_wake_irq(struct device *dev);
25
26#else /* !CONFIG_PM */
27
28static inline int dev_pm_set_wake_irq(struct device *dev, int irq)
29{
30 return 0;
31}
32
33static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
34{
35 return 0;
36}
37
38static inline void dev_pm_clear_wake_irq(struct device *dev)
39{
40}
41
42static inline void dev_pm_enable_wake_irq(struct device *dev)
43{
44}
45
46static inline void dev_pm_disable_wake_irq(struct device *dev)
47{
48}
49
50#endif /* CONFIG_PM */
51#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a0f70808d7f4..a3447932df1f 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -28,9 +28,17 @@
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30 30
31struct wake_irq;
32
31/** 33/**
32 * struct wakeup_source - Representation of wakeup sources 34 * struct wakeup_source - Representation of wakeup sources
33 * 35 *
36 * @name: Name of the wakeup source
37 * @entry: Wakeup source list entry
38 * @lock: Wakeup source lock
39 * @wakeirq: Optional device specific wakeirq
40 * @timer: Wakeup timer list
41 * @timer_expires: Wakeup timer expiration
34 * @total_time: Total time this wakeup source has been active. 42 * @total_time: Total time this wakeup source has been active.
35 * @max_time: Maximum time this wakeup source has been continuously active. 43 * @max_time: Maximum time this wakeup source has been continuously active.
36 * @last_time: Monotonic clock when the wakeup source's was touched last time. 44 * @last_time: Monotonic clock when the wakeup source's was touched last time.
@@ -47,6 +55,7 @@ struct wakeup_source {
47 const char *name; 55 const char *name;
48 struct list_head entry; 56 struct list_head entry;
49 spinlock_t lock; 57 spinlock_t lock;
58 struct wake_irq *wakeirq;
50 struct timer_list timer; 59 struct timer_list timer;
51 unsigned long timer_expires; 60 unsigned long timer_expires;
52 ktime_t total_time; 61 ktime_t total_time;
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
new file mode 100644
index 000000000000..d2114045a6c4
--- /dev/null
+++ b/include/linux/pmem.h
@@ -0,0 +1,152 @@
1/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __PMEM_H__
14#define __PMEM_H__
15
16#include <linux/io.h>
17
18#ifdef CONFIG_ARCH_HAS_PMEM_API
19#include <asm/cacheflush.h>
20#else
21static inline void arch_wmb_pmem(void)
22{
23 BUG();
24}
25
26static inline bool __arch_has_wmb_pmem(void)
27{
28 return false;
29}
30
31static inline void __pmem *arch_memremap_pmem(resource_size_t offset,
32 unsigned long size)
33{
34 return NULL;
35}
36
37static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
38 size_t n)
39{
40 BUG();
41}
42#endif
43
44/*
45 * Architectures that define ARCH_HAS_PMEM_API must provide
46 * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(),
47 * arch_wmb_pmem(), and __arch_has_wmb_pmem().
48 */
49
50static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
51{
52 memcpy(dst, (void __force const *) src, size);
53}
54
55static inline void memunmap_pmem(void __pmem *addr)
56{
57 iounmap((void __force __iomem *) addr);
58}
59
60/**
61 * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
62 *
63 * For a given cpu implementation within an architecture it is possible
64 * that wmb_pmem() resolves to a nop. In the case this returns
65 * false, pmem api users are unable to ensure durability and may want to
66 * fall back to a different data consistency model, or otherwise notify
67 * the user.
68 */
69static inline bool arch_has_wmb_pmem(void)
70{
71 if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
72 return __arch_has_wmb_pmem();
73 return false;
74}
75
76static inline bool arch_has_pmem_api(void)
77{
78 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
79}
80
81/*
82 * These defaults seek to offer decent performance and minimize the
83 * window between i/o completion and writes being durable on media.
84 * However, it is undefined / architecture specific whether
85 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
86 * making data durable relative to i/o completion.
87 */
88static void default_memcpy_to_pmem(void __pmem *dst, const void *src,
89 size_t size)
90{
91 memcpy((void __force *) dst, src, size);
92}
93
94static void __pmem *default_memremap_pmem(resource_size_t offset,
95 unsigned long size)
96{
97 return (void __pmem __force *)ioremap_wt(offset, size);
98}
99
100/**
101 * memremap_pmem - map physical persistent memory for pmem api
102 * @offset: physical address of persistent memory
103 * @size: size of the mapping
104 *
105 * Establish a mapping of the architecture specific memory type expected
106 * by memcpy_to_pmem() and wmb_pmem(). For example, it may be
107 * the case that an uncacheable or writethrough mapping is sufficient,
108 * or a writeback mapping provided memcpy_to_pmem() and
109 * wmb_pmem() arrange for the data to be written through the
110 * cache to persistent media.
111 */
112static inline void __pmem *memremap_pmem(resource_size_t offset,
113 unsigned long size)
114{
115 if (arch_has_pmem_api())
116 return arch_memremap_pmem(offset, size);
117 return default_memremap_pmem(offset, size);
118}
119
120/**
121 * memcpy_to_pmem - copy data to persistent memory
122 * @dst: destination buffer for the copy
123 * @src: source buffer for the copy
124 * @n: length of the copy in bytes
125 *
126 * Perform a memory copy that results in the destination of the copy
127 * being effectively evicted from, or never written to, the processor
128 * cache hierarchy after the copy completes. After memcpy_to_pmem()
129 * data may still reside in cpu or platform buffers, so this operation
130 * must be followed by a wmb_pmem().
131 */
132static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
133{
134 if (arch_has_pmem_api())
135 arch_memcpy_to_pmem(dst, src, n);
136 else
137 default_memcpy_to_pmem(dst, src, n);
138}
139
140/**
141 * wmb_pmem - synchronize writes to persistent memory
142 *
143 * After a series of memcpy_to_pmem() operations this drains data from
144 * cpu write buffers and any platform (memory controller) buffers to
145 * ensure that written data is durable on persistent memory media.
146 */
147static inline void wmb_pmem(void)
148{
149 if (arch_has_pmem_api())
150 arch_wmb_pmem();
151}
152#endif /* __PMEM_H__ */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index cf112b4075c8..522757ac9cd4 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -215,6 +215,10 @@ struct max17042_platform_data {
215 * the datasheet although it can be changed by board designers. 215 * the datasheet although it can be changed by board designers.
216 */ 216 */
217 unsigned int r_sns; 217 unsigned int r_sns;
218 int vmin; /* in millivolts */
219 int vmax; /* in millivolts */
220 int temp_min; /* in tenths of degree Celsius */
221 int temp_max; /* in tenths of degree Celsius */
218}; 222};
219 223
220#endif /* __MAX17042_BATTERY_H_ */ 224#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 75a1dd8dc56e..ef9f1592185d 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -206,6 +206,11 @@ struct power_supply_desc {
206 int (*set_property)(struct power_supply *psy, 206 int (*set_property)(struct power_supply *psy,
207 enum power_supply_property psp, 207 enum power_supply_property psp,
208 const union power_supply_propval *val); 208 const union power_supply_propval *val);
209 /*
210 * property_is_writeable() will be called during registration
211 * of power supply. If this happens during device probe then it must
212 * not access internal data of device (because probe did not end).
213 */
209 int (*property_is_writeable)(struct power_supply *psy, 214 int (*property_is_writeable)(struct power_supply *psy,
210 enum power_supply_property psp); 215 enum power_supply_property psp);
211 void (*external_power_changed)(struct power_supply *psy); 216 void (*external_power_changed)(struct power_supply *psy);
@@ -237,6 +242,7 @@ struct power_supply {
237 /* private */ 242 /* private */
238 struct device dev; 243 struct device dev;
239 struct work_struct changed_work; 244 struct work_struct changed_work;
245 struct delayed_work deferred_register_work;
240 spinlock_t changed_lock; 246 spinlock_t changed_lock;
241 bool changed; 247 bool changed;
242 atomic_t use_cnt; 248 atomic_t use_cnt;
@@ -286,10 +292,15 @@ extern void power_supply_put(struct power_supply *psy);
286#ifdef CONFIG_OF 292#ifdef CONFIG_OF
287extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, 293extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
288 const char *property); 294 const char *property);
295extern struct power_supply *devm_power_supply_get_by_phandle(
296 struct device *dev, const char *property);
289#else /* !CONFIG_OF */ 297#else /* !CONFIG_OF */
290static inline struct power_supply * 298static inline struct power_supply *
291power_supply_get_by_phandle(struct device_node *np, const char *property) 299power_supply_get_by_phandle(struct device_node *np, const char *property)
292{ return NULL; } 300{ return NULL; }
301static inline struct power_supply *
302devm_power_supply_get_by_phandle(struct device *dev, const char *property)
303{ return NULL; }
293#endif /* CONFIG_OF */ 304#endif /* CONFIG_OF */
294extern void power_supply_changed(struct power_supply *psy); 305extern void power_supply_changed(struct power_supply *psy);
295extern int power_supply_am_i_supplied(struct power_supply *psy); 306extern int power_supply_am_i_supplied(struct power_supply *psy);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 0f1534acaf60..84991f185173 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -293,6 +293,8 @@ struct preempt_notifier {
293 struct preempt_ops *ops; 293 struct preempt_ops *ops;
294}; 294};
295 295
296void preempt_notifier_inc(void);
297void preempt_notifier_dec(void);
296void preempt_notifier_register(struct preempt_notifier *notifier); 298void preempt_notifier_register(struct preempt_notifier *notifier);
297void preempt_notifier_unregister(struct preempt_notifier *notifier); 299void preempt_notifier_unregister(struct preempt_notifier *notifier);
298 300
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 9b30871c9149..a6298b27ac99 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -30,6 +30,8 @@ static inline const char *printk_skip_level(const char *buffer)
30 return buffer; 30 return buffer;
31} 31}
32 32
33#define CONSOLE_EXT_LOG_MAX 8192
34
33/* printk's without a loglevel use this.. */ 35/* printk's without a loglevel use this.. */
34#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT 36#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
35 37
@@ -120,7 +122,7 @@ static inline __printf(1, 2) __cold
120void early_printk(const char *s, ...) { } 122void early_printk(const char *s, ...) { }
121#endif 123#endif
122 124
123typedef int(*printk_func_t)(const char *fmt, va_list args); 125typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
124 126
125#ifdef CONFIG_PRINTK 127#ifdef CONFIG_PRINTK
126asmlinkage __printf(5, 0) 128asmlinkage __printf(5, 0)
@@ -164,7 +166,7 @@ char *log_buf_addr_get(void);
164u32 log_buf_len_get(void); 166u32 log_buf_len_get(void);
165void log_buf_kexec_setup(void); 167void log_buf_kexec_setup(void);
166void __init setup_log_buf(int early); 168void __init setup_log_buf(int early);
167void dump_stack_set_arch_desc(const char *fmt, ...); 169__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
168void dump_stack_print_info(const char *log_lvl); 170void dump_stack_print_info(const char *log_lvl);
169void show_regs_print_info(const char *log_lvl); 171void show_regs_print_info(const char *log_lvl);
170#else 172#else
@@ -215,7 +217,7 @@ static inline void setup_log_buf(int early)
215{ 217{
216} 218}
217 219
218static inline void dump_stack_set_arch_desc(const char *fmt, ...) 220static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
219{ 221{
220} 222}
221 223
diff --git a/include/linux/property.h b/include/linux/property.h
index de8bdf417a35..76ebde9c11d4 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -164,4 +164,6 @@ struct property_set {
164 164
165void device_add_property_set(struct device *dev, struct property_set *pset); 165void device_add_property_set(struct device *dev, struct property_set *pset);
166 166
167bool device_dma_is_coherent(struct device *dev);
168
167#endif /* _LINUX_PROPERTY_H_ */ 169#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index dab545bb66b3..0485bab061fd 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -194,8 +194,9 @@ enum pxa_ssp_type {
194 PXA168_SSP, 194 PXA168_SSP,
195 PXA910_SSP, 195 PXA910_SSP,
196 CE4100_SSP, 196 CE4100_SSP,
197 LPSS_SSP,
198 QUARK_X1000_SSP, 197 QUARK_X1000_SSP,
198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
199 LPSS_BYT_SSP,
199}; 200};
200 201
201struct ssp_device { 202struct ssp_device {
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index d7a974d5f57c..6e7d5ec65838 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,4 +1,4 @@
1/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 1/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2015 Linaro Ltd. 2 * Copyright (C) 2015 Linaro Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -16,6 +16,17 @@
16extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); 16extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
17extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); 17extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
18 18
19#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
20
21struct qcom_scm_hdcp_req {
22 u32 addr;
23 u32 val;
24};
25
26extern bool qcom_scm_hdcp_available(void);
27extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
28 u32 *resp);
29
19#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 30#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
20#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 31#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
21 32
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index fb31765e935a..830c4992088d 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -31,6 +31,7 @@
31 31
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/stddef.h> 33#include <linux/stddef.h>
34#include <linux/rcupdate.h>
34 35
35struct rb_node { 36struct rb_node {
36 unsigned long __rb_parent_color; 37 unsigned long __rb_parent_color;
@@ -73,11 +74,11 @@ extern struct rb_node *rb_first_postorder(const struct rb_root *);
73extern struct rb_node *rb_next_postorder(const struct rb_node *); 74extern struct rb_node *rb_next_postorder(const struct rb_node *);
74 75
75/* Fast replacement of a single node without remove/rebalance/add/rebalance */ 76/* Fast replacement of a single node without remove/rebalance/add/rebalance */
76extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 77extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
77 struct rb_root *root); 78 struct rb_root *root);
78 79
79static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, 80static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
80 struct rb_node ** rb_link) 81 struct rb_node **rb_link)
81{ 82{
82 node->__rb_parent_color = (unsigned long)parent; 83 node->__rb_parent_color = (unsigned long)parent;
83 node->rb_left = node->rb_right = NULL; 84 node->rb_left = node->rb_right = NULL;
@@ -85,6 +86,15 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
85 *rb_link = node; 86 *rb_link = node;
86} 87}
87 88
89static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
90 struct rb_node **rb_link)
91{
92 node->__rb_parent_color = (unsigned long)parent;
93 node->rb_left = node->rb_right = NULL;
94
95 rcu_assign_pointer(*rb_link, node);
96}
97
88#define rb_entry_safe(ptr, type, member) \ 98#define rb_entry_safe(ptr, type, member) \
89 ({ typeof(ptr) ____ptr = (ptr); \ 99 ({ typeof(ptr) ____ptr = (ptr); \
90 ____ptr ? rb_entry(____ptr, type, member) : NULL; \ 100 ____ptr ? rb_entry(____ptr, type, member) : NULL; \
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 378c5ee75f78..14d7b831b63a 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -123,11 +123,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
123{ 123{
124 if (parent) { 124 if (parent) {
125 if (parent->rb_left == old) 125 if (parent->rb_left == old)
126 parent->rb_left = new; 126 WRITE_ONCE(parent->rb_left, new);
127 else 127 else
128 parent->rb_right = new; 128 WRITE_ONCE(parent->rb_right, new);
129 } else 129 } else
130 root->rb_node = new; 130 WRITE_ONCE(root->rb_node, new);
131} 131}
132 132
133extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, 133extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@@ -137,7 +137,8 @@ static __always_inline struct rb_node *
137__rb_erase_augmented(struct rb_node *node, struct rb_root *root, 137__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
138 const struct rb_augment_callbacks *augment) 138 const struct rb_augment_callbacks *augment)
139{ 139{
140 struct rb_node *child = node->rb_right, *tmp = node->rb_left; 140 struct rb_node *child = node->rb_right;
141 struct rb_node *tmp = node->rb_left;
141 struct rb_node *parent, *rebalance; 142 struct rb_node *parent, *rebalance;
142 unsigned long pc; 143 unsigned long pc;
143 144
@@ -167,6 +168,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
167 tmp = parent; 168 tmp = parent;
168 } else { 169 } else {
169 struct rb_node *successor = child, *child2; 170 struct rb_node *successor = child, *child2;
171
170 tmp = child->rb_left; 172 tmp = child->rb_left;
171 if (!tmp) { 173 if (!tmp) {
172 /* 174 /*
@@ -180,6 +182,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
180 */ 182 */
181 parent = successor; 183 parent = successor;
182 child2 = successor->rb_right; 184 child2 = successor->rb_right;
185
183 augment->copy(node, successor); 186 augment->copy(node, successor);
184 } else { 187 } else {
185 /* 188 /*
@@ -201,19 +204,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
201 successor = tmp; 204 successor = tmp;
202 tmp = tmp->rb_left; 205 tmp = tmp->rb_left;
203 } while (tmp); 206 } while (tmp);
204 parent->rb_left = child2 = successor->rb_right; 207 child2 = successor->rb_right;
205 successor->rb_right = child; 208 WRITE_ONCE(parent->rb_left, child2);
209 WRITE_ONCE(successor->rb_right, child);
206 rb_set_parent(child, successor); 210 rb_set_parent(child, successor);
211
207 augment->copy(node, successor); 212 augment->copy(node, successor);
208 augment->propagate(parent, successor); 213 augment->propagate(parent, successor);
209 } 214 }
210 215
211 successor->rb_left = tmp = node->rb_left; 216 tmp = node->rb_left;
217 WRITE_ONCE(successor->rb_left, tmp);
212 rb_set_parent(tmp, successor); 218 rb_set_parent(tmp, successor);
213 219
214 pc = node->__rb_parent_color; 220 pc = node->__rb_parent_color;
215 tmp = __rb_parent(pc); 221 tmp = __rb_parent(pc);
216 __rb_change_child(node, successor, tmp, root); 222 __rb_change_child(node, successor, tmp, root);
223
217 if (child2) { 224 if (child2) {
218 successor->__rb_parent_color = pc; 225 successor->__rb_parent_color = pc;
219 rb_set_parent_color(child2, parent, RB_BLACK); 226 rb_set_parent_color(child2, parent, RB_BLACK);
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
new file mode 100644
index 000000000000..4f3432c61d12
--- /dev/null
+++ b/include/linux/rbtree_latch.h
@@ -0,0 +1,212 @@
1/*
2 * Latched RB-trees
3 *
4 * Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org>
5 *
6 * Since RB-trees have non-atomic modifications they're not immediately suited
7 * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for
8 * lockless lookups; we cannot guarantee they return a correct result.
9 *
10 * The simplest solution is a seqlock + RB-tree, this will allow lockless
11 * lookups; but has the constraint (inherent to the seqlock) that read sides
12 * cannot nest in write sides.
13 *
14 * If we need to allow unconditional lookups (say as required for NMI context
15 * usage) we need a more complex setup; this data structure provides this by
16 * employing the latch technique -- see @raw_write_seqcount_latch -- to
17 * implement a latched RB-tree which does allow for unconditional lookups by
18 * virtue of always having (at least) one stable copy of the tree.
19 *
20 * However, while we have the guarantee that there is at all times one stable
21 * copy, this does not guarantee an iteration will not observe modifications.
22 * What might have been a stable copy at the start of the iteration, need not
23 * remain so for the duration of the iteration.
24 *
25 * Therefore, this does require a lockless RB-tree iteration to be non-fatal;
26 * see the comment in lib/rbtree.c. Note however that we only require the first
27 * condition -- not seeing partial stores -- because the latch thing isolates
28 * us from loops. If we were to interrupt a modification the lookup would be
29 * pointed at the stable tree and complete while the modification was halted.
30 */
31
32#ifndef RB_TREE_LATCH_H
33#define RB_TREE_LATCH_H
34
35#include <linux/rbtree.h>
36#include <linux/seqlock.h>
37
38struct latch_tree_node {
39 struct rb_node node[2];
40};
41
42struct latch_tree_root {
43 seqcount_t seq;
44 struct rb_root tree[2];
45};
46
47/**
48 * latch_tree_ops - operators to define the tree order
49 * @less: used for insertion; provides the (partial) order between two elements.
50 * @comp: used for lookups; provides the order between the search key and an element.
51 *
52 * The operators are related like:
53 *
54 * comp(a->key,b) < 0 := less(a,b)
55 * comp(a->key,b) > 0 := less(b,a)
56 * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
57 *
58 * If these operators define a partial order on the elements we make no
59 * guarantee on which of the elements matching the key is found. See
60 * latch_tree_find().
61 */
62struct latch_tree_ops {
63 bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b);
64 int (*comp)(void *key, struct latch_tree_node *b);
65};
66
67static __always_inline struct latch_tree_node *
68__lt_from_rb(struct rb_node *node, int idx)
69{
70 return container_of(node, struct latch_tree_node, node[idx]);
71}
72
73static __always_inline void
74__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx,
75 bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
76{
77 struct rb_root *root = &ltr->tree[idx];
78 struct rb_node **link = &root->rb_node;
79 struct rb_node *node = &ltn->node[idx];
80 struct rb_node *parent = NULL;
81 struct latch_tree_node *ltp;
82
83 while (*link) {
84 parent = *link;
85 ltp = __lt_from_rb(parent, idx);
86
87 if (less(ltn, ltp))
88 link = &parent->rb_left;
89 else
90 link = &parent->rb_right;
91 }
92
93 rb_link_node_rcu(node, parent, link);
94 rb_insert_color(node, root);
95}
96
97static __always_inline void
98__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx)
99{
100 rb_erase(&ltn->node[idx], &ltr->tree[idx]);
101}
102
103static __always_inline struct latch_tree_node *
104__lt_find(void *key, struct latch_tree_root *ltr, int idx,
105 int (*comp)(void *key, struct latch_tree_node *node))
106{
107 struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node);
108 struct latch_tree_node *ltn;
109 int c;
110
111 while (node) {
112 ltn = __lt_from_rb(node, idx);
113 c = comp(key, ltn);
114
115 if (c < 0)
116 node = rcu_dereference_raw(node->rb_left);
117 else if (c > 0)
118 node = rcu_dereference_raw(node->rb_right);
119 else
120 return ltn;
121 }
122
123 return NULL;
124}
125
126/**
127 * latch_tree_insert() - insert @node into the trees @root
128 * @node: nodes to insert
129 * @root: trees to insert @node into
130 * @ops: operators defining the node order
131 *
132 * It inserts @node into @root in an ordered fashion such that we can always
133 * observe one complete tree. See the comment for raw_write_seqcount_latch().
134 *
135 * The inserts use rcu_assign_pointer() to publish the element such that the
136 * tree structure is stored before we can observe the new @node.
137 *
138 * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
139 * serialized.
140 */
141static __always_inline void
142latch_tree_insert(struct latch_tree_node *node,
143 struct latch_tree_root *root,
144 const struct latch_tree_ops *ops)
145{
146 raw_write_seqcount_latch(&root->seq);
147 __lt_insert(node, root, 0, ops->less);
148 raw_write_seqcount_latch(&root->seq);
149 __lt_insert(node, root, 1, ops->less);
150}
151
152/**
153 * latch_tree_erase() - removes @node from the trees @root
154 * @node: nodes to remote
155 * @root: trees to remove @node from
156 * @ops: operators defining the node order
157 *
158 * Removes @node from the trees @root in an ordered fashion such that we can
159 * always observe one complete tree. See the comment for
160 * raw_write_seqcount_latch().
161 *
162 * It is assumed that @node will observe one RCU quiescent state before being
163 * reused of freed.
164 *
165 * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
166 * serialized.
167 */
168static __always_inline void
169latch_tree_erase(struct latch_tree_node *node,
170 struct latch_tree_root *root,
171 const struct latch_tree_ops *ops)
172{
173 raw_write_seqcount_latch(&root->seq);
174 __lt_erase(node, root, 0);
175 raw_write_seqcount_latch(&root->seq);
176 __lt_erase(node, root, 1);
177}
178
179/**
180 * latch_tree_find() - find the node matching @key in the trees @root
181 * @key: search key
182 * @root: trees to search for @key
183 * @ops: operators defining the node order
184 *
185 * Does a lockless lookup in the trees @root for the node matching @key.
186 *
187 * It is assumed that this is called while holding the appropriate RCU read
188 * side lock.
189 *
190 * If the operators define a partial order on the elements (there are multiple
191 * elements which have the same key value) it is undefined which of these
192 * elements will be found. Nor is it possible to iterate the tree to find
193 * further elements with the same key value.
194 *
195 * Returns: a pointer to the node matching @key or NULL.
196 */
197static __always_inline struct latch_tree_node *
198latch_tree_find(void *key, struct latch_tree_root *root,
199 const struct latch_tree_ops *ops)
200{
201 struct latch_tree_node *node;
202 unsigned int seq;
203
204 do {
205 seq = raw_read_seqcount_latch(&root->seq);
206 node = __lt_find(key, root, seq & 1, ops->comp);
207 } while (read_seqcount_retry(&root->seq, seq));
208
209 return node;
210}
211
212#endif /* RB_TREE_LATCH_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 33a056bb886f..4cf5f51b4c9c 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -633,21 +633,6 @@ static inline void rcu_preempt_sleep_check(void)
633#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) 633#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
634 634
635/** 635/**
636 * lockless_dereference() - safely load a pointer for later dereference
637 * @p: The pointer to load
638 *
639 * Similar to rcu_dereference(), but for situations where the pointed-to
640 * object's lifetime is managed by something other than RCU. That
641 * "something other" might be reference counting or simple immortality.
642 */
643#define lockless_dereference(p) \
644({ \
645 typeof(p) _________p1 = READ_ONCE(p); \
646 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
647 (_________p1); \
648})
649
650/**
651 * rcu_assign_pointer() - assign to RCU-protected pointer 636 * rcu_assign_pointer() - assign to RCU-protected pointer
652 * @p: pointer to assign to 637 * @p: pointer to assign to
653 * @v: value to assign (publish) 638 * @v: value to assign (publish)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 116655d92269..59c55ea0f0b5 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -433,6 +433,8 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
433 unsigned int mask, unsigned int val, 433 unsigned int mask, unsigned int val,
434 bool *change); 434 bool *change);
435int regmap_get_val_bytes(struct regmap *map); 435int regmap_get_val_bytes(struct regmap *map);
436int regmap_get_max_register(struct regmap *map);
437int regmap_get_reg_stride(struct regmap *map);
436int regmap_async_complete(struct regmap *map); 438int regmap_async_complete(struct regmap *map);
437bool regmap_can_raw_write(struct regmap *map); 439bool regmap_can_raw_write(struct regmap *map);
438 440
@@ -676,6 +678,18 @@ static inline int regmap_get_val_bytes(struct regmap *map)
676 return -EINVAL; 678 return -EINVAL;
677} 679}
678 680
681static inline int regmap_get_max_register(struct regmap *map)
682{
683 WARN_ONCE(1, "regmap API is disabled");
684 return -EINVAL;
685}
686
687static inline int regmap_get_reg_stride(struct regmap *map)
688{
689 WARN_ONCE(1, "regmap API is disabled");
690 return -EINVAL;
691}
692
679static inline int regcache_sync(struct regmap *map) 693static inline int regcache_sync(struct regmap *map)
680{ 694{
681 WARN_ONCE(1, "regmap API is disabled"); 695 WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fffa688ac3a7..4db9fbe4889d 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -91,6 +91,7 @@ struct regulator_linear_range {
91 * @set_current_limit: Configure a limit for a current-limited regulator. 91 * @set_current_limit: Configure a limit for a current-limited regulator.
92 * The driver should select the current closest to max_uA. 92 * The driver should select the current closest to max_uA.
93 * @get_current_limit: Get the configured limit for a current-limited regulator. 93 * @get_current_limit: Get the configured limit for a current-limited regulator.
94 * @set_input_current_limit: Configure an input limit.
94 * 95 *
95 * @set_mode: Set the configured operating mode for the regulator. 96 * @set_mode: Set the configured operating mode for the regulator.
96 * @get_mode: Get the configured operating mode for the regulator. 97 * @get_mode: Get the configured operating mode for the regulator.
@@ -111,6 +112,7 @@ struct regulator_linear_range {
111 * to stabilise after being set to a new value, in microseconds. 112 * to stabilise after being set to a new value, in microseconds.
112 * The function provides the from and to voltage selector, the 113 * The function provides the from and to voltage selector, the
113 * function should return the worst case. 114 * function should return the worst case.
115 * @set_soft_start: Enable soft start for the regulator.
114 * 116 *
115 * @set_suspend_voltage: Set the voltage for the regulator when the system 117 * @set_suspend_voltage: Set the voltage for the regulator when the system
116 * is suspended. 118 * is suspended.
@@ -121,6 +123,9 @@ struct regulator_linear_range {
121 * @set_suspend_mode: Set the operating mode for the regulator when the 123 * @set_suspend_mode: Set the operating mode for the regulator when the
122 * system is suspended. 124 * system is suspended.
123 * 125 *
126 * @set_pull_down: Configure the regulator to pull down when the regulator
127 * is disabled.
128 *
124 * This struct describes regulator operations which can be implemented by 129 * This struct describes regulator operations which can be implemented by
125 * regulator chip drivers. 130 * regulator chip drivers.
126 */ 131 */
@@ -142,6 +147,8 @@ struct regulator_ops {
142 int min_uA, int max_uA); 147 int min_uA, int max_uA);
143 int (*get_current_limit) (struct regulator_dev *); 148 int (*get_current_limit) (struct regulator_dev *);
144 149
150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
151
145 /* enable/disable regulator */ 152 /* enable/disable regulator */
146 int (*enable) (struct regulator_dev *); 153 int (*enable) (struct regulator_dev *);
147 int (*disable) (struct regulator_dev *); 154 int (*disable) (struct regulator_dev *);
@@ -158,6 +165,8 @@ struct regulator_ops {
158 unsigned int old_selector, 165 unsigned int old_selector,
159 unsigned int new_selector); 166 unsigned int new_selector);
160 167
168 int (*set_soft_start) (struct regulator_dev *);
169
161 /* report regulator status ... most other accessors report 170 /* report regulator status ... most other accessors report
162 * control inputs, this reports results of combining inputs 171 * control inputs, this reports results of combining inputs
163 * from Linux (and other sources) with the actual load. 172 * from Linux (and other sources) with the actual load.
@@ -187,6 +196,8 @@ struct regulator_ops {
187 196
188 /* set regulator suspend operating mode (defined in consumer.h) */ 197 /* set regulator suspend operating mode (defined in consumer.h) */
189 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); 198 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
199
200 int (*set_pull_down) (struct regulator_dev *);
190}; 201};
191 202
192/* 203/*
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b07562e082c4..b11be1260129 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -75,6 +75,8 @@ struct regulator_state {
75 * 75 *
76 * @min_uA: Smallest current consumers may set. 76 * @min_uA: Smallest current consumers may set.
77 * @max_uA: Largest current consumers may set. 77 * @max_uA: Largest current consumers may set.
78 * @ilim_uA: Maximum input current.
79 * @system_load: Load that isn't captured by any consumer requests.
78 * 80 *
79 * @valid_modes_mask: Mask of modes which may be configured by consumers. 81 * @valid_modes_mask: Mask of modes which may be configured by consumers.
80 * @valid_ops_mask: Operations which may be performed by consumers. 82 * @valid_ops_mask: Operations which may be performed by consumers.
@@ -86,6 +88,8 @@ struct regulator_state {
86 * applied. 88 * applied.
87 * @apply_uV: Apply the voltage constraint when initialising. 89 * @apply_uV: Apply the voltage constraint when initialising.
88 * @ramp_disable: Disable ramp delay when initialising or when setting voltage. 90 * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
91 * @soft_start: Enable soft start so that voltage ramps slowly.
92 * @pull_down: Enable pull down when regulator is disabled.
89 * 93 *
90 * @input_uV: Input voltage for regulator when supplied by another regulator. 94 * @input_uV: Input voltage for regulator when supplied by another regulator.
91 * 95 *
@@ -111,6 +115,9 @@ struct regulation_constraints {
111 /* current output range (inclusive) - for current control */ 115 /* current output range (inclusive) - for current control */
112 int min_uA; 116 int min_uA;
113 int max_uA; 117 int max_uA;
118 int ilim_uA;
119
120 int system_load;
114 121
115 /* valid regulator operating modes for this machine */ 122 /* valid regulator operating modes for this machine */
116 unsigned int valid_modes_mask; 123 unsigned int valid_modes_mask;
@@ -138,6 +145,8 @@ struct regulation_constraints {
138 unsigned boot_on:1; /* bootloader/firmware enabled regulator */ 145 unsigned boot_on:1; /* bootloader/firmware enabled regulator */
139 unsigned apply_uV:1; /* apply uV constraint if min == max */ 146 unsigned apply_uV:1; /* apply uV constraint if min == max */
140 unsigned ramp_disable:1; /* disable ramp delay */ 147 unsigned ramp_disable:1; /* disable ramp delay */
148 unsigned soft_start:1; /* ramp voltage slowly */
149 unsigned pull_down:1; /* pull down resistor when regulator off */
141}; 150};
142 151
143/** 152/**
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f8acc052e353..f6a8a16a0d4d 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -58,6 +58,9 @@
58 * control signal from EN input pin. If it is false then 58 * control signal from EN input pin. If it is false then
59 * voltage output will be enabled/disabled through EN bit of 59 * voltage output will be enabled/disabled through EN bit of
60 * device register. 60 * device register.
61 * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
62 * then GPIO number can be provided. If no GPIO controlled then
63 * it should be -1.
61 * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic. 64 * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
62 * @dvs_def_state: Default state of dvs. 1 if it is high else 0. 65 * @dvs_def_state: Default state of dvs. 1 if it is high else 0.
63 */ 66 */
@@ -65,6 +68,7 @@ struct max8973_regulator_platform_data {
65 struct regulator_init_data *reg_init_data; 68 struct regulator_init_data *reg_init_data;
66 unsigned long control_flags; 69 unsigned long control_flags;
67 bool enable_ext_control; 70 bool enable_ext_control;
71 int enable_gpio;
68 int dvs_gpio; 72 int dvs_gpio;
69 unsigned dvs_def_state:1; 73 unsigned dvs_def_state:1;
70}; 74};
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 78b8a9b9d40a..9c4e1384f636 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -36,11 +36,11 @@
36#define REMOTEPROC_H 36#define REMOTEPROC_H
37 37
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/klist.h>
40#include <linux/mutex.h> 39#include <linux/mutex.h>
41#include <linux/virtio.h> 40#include <linux/virtio.h>
42#include <linux/completion.h> 41#include <linux/completion.h>
43#include <linux/idr.h> 42#include <linux/idr.h>
43#include <linux/of.h>
44 44
45/** 45/**
46 * struct resource_table - firmware resource table header 46 * struct resource_table - firmware resource table header
@@ -330,11 +330,13 @@ struct rproc;
330 * @start: power on the device and boot it 330 * @start: power on the device and boot it
331 * @stop: power off the device 331 * @stop: power off the device
332 * @kick: kick a virtqueue (virtqueue id given as a parameter) 332 * @kick: kick a virtqueue (virtqueue id given as a parameter)
333 * @da_to_va: optional platform hook to perform address translations
333 */ 334 */
334struct rproc_ops { 335struct rproc_ops {
335 int (*start)(struct rproc *rproc); 336 int (*start)(struct rproc *rproc);
336 int (*stop)(struct rproc *rproc); 337 int (*stop)(struct rproc *rproc);
337 void (*kick)(struct rproc *rproc, int vqid); 338 void (*kick)(struct rproc *rproc, int vqid);
339 void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
338}; 340};
339 341
340/** 342/**
@@ -375,7 +377,7 @@ enum rproc_crash_type {
375 377
376/** 378/**
377 * struct rproc - represents a physical remote processor device 379 * struct rproc - represents a physical remote processor device
378 * @node: klist node of this rproc object 380 * @node: list node of this rproc object
379 * @domain: iommu domain 381 * @domain: iommu domain
380 * @name: human readable name of the rproc 382 * @name: human readable name of the rproc
381 * @firmware: name of firmware file to be loaded 383 * @firmware: name of firmware file to be loaded
@@ -407,7 +409,7 @@ enum rproc_crash_type {
407 * @has_iommu: flag to indicate if remote processor is behind an MMU 409 * @has_iommu: flag to indicate if remote processor is behind an MMU
408 */ 410 */
409struct rproc { 411struct rproc {
410 struct klist_node node; 412 struct list_head node;
411 struct iommu_domain *domain; 413 struct iommu_domain *domain;
412 const char *name; 414 const char *name;
413 const char *firmware; 415 const char *firmware;
@@ -481,6 +483,7 @@ struct rproc_vdev {
481 u32 rsc_offset; 483 u32 rsc_offset;
482}; 484};
483 485
486struct rproc *rproc_get_by_phandle(phandle phandle);
484struct rproc *rproc_alloc(struct device *dev, const char *name, 487struct rproc *rproc_alloc(struct device *dev, const char *name,
485 const struct rproc_ops *ops, 488 const struct rproc_ops *ops,
486 const char *firmware, int len); 489 const char *firmware, int len);
diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h
new file mode 100644
index 000000000000..bb4af7b5eb36
--- /dev/null
+++ b/include/linux/reset/bcm63xx_pmb.h
@@ -0,0 +1,88 @@
1/*
2 * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset)
3 *
4 * Copyright (C) 2015, Broadcom Corporation
5 * Author: Florian Fainelli <f.fainelli@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16#ifndef __BCM63XX_PMB_H
17#define __BCM63XX_PMB_H
18
19#include <linux/io.h>
20#include <linux/types.h>
21#include <linux/delay.h>
22#include <linux/err.h>
23
24/* PMB Master controller register */
25#define PMB_CTRL 0x00
26#define PMC_PMBM_START (1 << 31)
27#define PMC_PMBM_TIMEOUT (1 << 30)
28#define PMC_PMBM_SLAVE_ERR (1 << 29)
29#define PMC_PMBM_BUSY (1 << 28)
30#define PMC_PMBM_READ (0 << 20)
31#define PMC_PMBM_WRITE (1 << 20)
32#define PMB_WR_DATA 0x04
33#define PMB_TIMEOUT 0x08
34#define PMB_RD_DATA 0x0C
35
36#define PMB_BUS_ID_SHIFT 8
37
38/* Perform the low-level PMB master operation, shared between reads and
39 * writes.
40 */
41static inline int __bpcm_do_op(void __iomem *master, unsigned int addr,
42 u32 off, u32 op)
43{
44 unsigned int timeout = 1000;
45 u32 cmd;
46
47 cmd = (PMC_PMBM_START | op | (addr & 0xff) << 12 | off);
48 writel(cmd, master + PMB_CTRL);
49 do {
50 cmd = readl(master + PMB_CTRL);
51 if (!(cmd & PMC_PMBM_START))
52 return 0;
53
54 if (cmd & PMC_PMBM_SLAVE_ERR)
55 return -EIO;
56
57 if (cmd & PMC_PMBM_TIMEOUT)
58 return -ETIMEDOUT;
59
60 udelay(1);
61 } while (timeout-- > 0);
62
63 return -ETIMEDOUT;
64}
65
66static inline int bpcm_rd(void __iomem *master, unsigned int addr,
67 u32 off, u32 *val)
68{
69 int ret = 0;
70
71 ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_READ);
72 *val = readl(master + PMB_RD_DATA);
73
74 return ret;
75}
76
77static inline int bpcm_wr(void __iomem *master, unsigned int addr,
78 u32 off, u32 val)
79{
80 int ret = 0;
81
82 writel(val, master + PMB_WR_DATA);
83 ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_WRITE);
84
85 return ret;
86}
87
88#endif /* __BCM63XX_PMB_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 6bda06f21930..cde976e86b48 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -298,7 +298,7 @@ struct rio_id_table {
298 * struct rio_net - RIO network info 298 * struct rio_net - RIO network info
299 * @node: Node in global list of RIO networks 299 * @node: Node in global list of RIO networks
300 * @devices: List of devices in this network 300 * @devices: List of devices in this network
301 * @switches: List of switches in this netowrk 301 * @switches: List of switches in this network
302 * @mports: List of master ports accessing this network 302 * @mports: List of master ports accessing this network
303 * @hport: Default port for accessing this network 303 * @hport: Default port for accessing this network
304 * @id: RIO network ID 304 * @id: RIO network ID
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 8dcf6825fa88..3359f0422c6b 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -24,6 +24,14 @@ extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm);
24ktime_t rtc_tm_to_ktime(struct rtc_time tm); 24ktime_t rtc_tm_to_ktime(struct rtc_time tm);
25struct rtc_time rtc_ktime_to_tm(ktime_t kt); 25struct rtc_time rtc_ktime_to_tm(ktime_t kt);
26 26
27/*
28 * rtc_tm_sub - Return the difference in seconds.
29 */
30static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
31{
32 return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
33}
34
27/** 35/**
28 * Deprecated. Use rtc_time64_to_tm(). 36 * Deprecated. Use rtc_time64_to_tm().
29 */ 37 */
@@ -101,8 +109,7 @@ struct rtc_timer {
101/* flags */ 109/* flags */
102#define RTC_DEV_BUSY 0 110#define RTC_DEV_BUSY 0
103 111
104struct rtc_device 112struct rtc_device {
105{
106 struct device dev; 113 struct device dev;
107 struct module *owner; 114 struct module *owner;
108 115
@@ -161,7 +168,6 @@ extern void devm_rtc_device_unregister(struct device *dev,
161 168
162extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); 169extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
163extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); 170extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
164extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
165extern int rtc_set_ntp_time(struct timespec64 now); 171extern int rtc_set_ntp_time(struct timespec64 now);
166int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); 172int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
167extern int rtc_read_alarm(struct rtc_device *rtc, 173extern int rtc_read_alarm(struct rtc_device *rtc,
@@ -198,10 +204,10 @@ int rtc_register(rtc_task_t *task);
198int rtc_unregister(rtc_task_t *task); 204int rtc_unregister(rtc_task_t *task);
199int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); 205int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
200 206
201void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data); 207void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
202int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, 208int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
203 ktime_t expires, ktime_t period); 209 ktime_t expires, ktime_t period);
204int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer); 210void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
205void rtc_timer_do_work(struct work_struct *work); 211void rtc_timer_do_work(struct work_struct *work);
206 212
207static inline bool is_leap_year(unsigned int year) 213static inline bool is_leap_year(unsigned int year)
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
index 2c92e1c8e055..aefd997262e4 100644
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ b/include/linux/rtc/sirfsoc_rtciobrg.h
@@ -9,10 +9,14 @@
9#ifndef _SIRFSOC_RTC_IOBRG_H_ 9#ifndef _SIRFSOC_RTC_IOBRG_H_
10#define _SIRFSOC_RTC_IOBRG_H_ 10#define _SIRFSOC_RTC_IOBRG_H_
11 11
12struct regmap_config;
13
12extern void sirfsoc_rtc_iobrg_besyncing(void); 14extern void sirfsoc_rtc_iobrg_besyncing(void);
13 15
14extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); 16extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
15 17
16extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); 18extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
19struct regmap *devm_regmap_init_iobg(struct device *dev,
20 const struct regmap_config *config);
17 21
18#endif 22#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 7b8e260c4a27..39adaa9529eb 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -79,17 +79,9 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
79 79
80struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); 80struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
81 81
82#ifdef CONFIG_NET_CLS_ACT 82#ifdef CONFIG_NET_INGRESS
83void net_inc_ingress_queue(void); 83void net_inc_ingress_queue(void);
84void net_dec_ingress_queue(void); 84void net_dec_ingress_queue(void);
85#else
86static inline void net_inc_ingress_queue(void)
87{
88}
89
90static inline void net_dec_ingress_queue(void)
91{
92}
93#endif 85#endif
94 86
95extern void rtnetlink_init(void); 87extern void rtnetlink_init(void);
@@ -122,5 +114,9 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
122 114
123extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 115extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
124 struct net_device *dev, u16 mode, 116 struct net_device *dev, u16 mode,
125 u32 flags, u32 mask, int nlflags); 117 u32 flags, u32 mask, int nlflags,
118 u32 filter_mask,
119 int (*vlan_fill)(struct sk_buff *skb,
120 struct net_device *dev,
121 u32 filter_mask));
126#endif /* __LINUX_RTNETLINK_H */ 122#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index a0edb992c9c3..9b1ef0c820a7 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -2,13 +2,39 @@
2#define _LINUX_SCATTERLIST_H 2#define _LINUX_SCATTERLIST_H
3 3
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/types.h>
5#include <linux/bug.h> 6#include <linux/bug.h>
6#include <linux/mm.h> 7#include <linux/mm.h>
7
8#include <asm/types.h>
9#include <asm/scatterlist.h>
10#include <asm/io.h> 8#include <asm/io.h>
11 9
10struct scatterlist {
11#ifdef CONFIG_DEBUG_SG
12 unsigned long sg_magic;
13#endif
14 unsigned long page_link;
15 unsigned int offset;
16 unsigned int length;
17 dma_addr_t dma_address;
18#ifdef CONFIG_NEED_SG_DMA_LENGTH
19 unsigned int dma_length;
20#endif
21};
22
23/*
24 * These macros should be used after a dma_map_sg call has been done
25 * to get bus addresses of each of the SG entries and their lengths.
26 * You should only work with the number of sg entries dma_map_sg
27 * returns, or alternatively stop on the first sg_dma_len(sg) which
28 * is 0.
29 */
30#define sg_dma_address(sg) ((sg)->dma_address)
31
32#ifdef CONFIG_NEED_SG_DMA_LENGTH
33#define sg_dma_len(sg) ((sg)->dma_length)
34#else
35#define sg_dma_len(sg) ((sg)->length)
36#endif
37
12struct sg_table { 38struct sg_table {
13 struct scatterlist *sgl; /* the list */ 39 struct scatterlist *sgl; /* the list */
14 unsigned int nents; /* number of mapped entries */ 40 unsigned int nents; /* number of mapped entries */
@@ -18,10 +44,9 @@ struct sg_table {
18/* 44/*
19 * Notes on SG table design. 45 * Notes on SG table design.
20 * 46 *
21 * Architectures must provide an unsigned long page_link field in the 47 * We use the unsigned long page_link field in the scatterlist struct to place
22 * scatterlist struct. We use that to place the page pointer AND encode 48 * the page pointer AND encode information about the sg table as well. The two
23 * information about the sg table as well. The two lower bits are reserved 49 * lower bits are reserved for this information.
24 * for this information.
25 * 50 *
26 * If bit 0 is set, then the page_link contains a pointer to the next sg 51 * If bit 0 is set, then the page_link contains a pointer to the next sg
27 * table list. Otherwise the next entry is at sg + 1. 52 * table list. Otherwise the next entry is at sg + 1.
@@ -240,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
240 unsigned long offset, unsigned long size, 265 unsigned long offset, unsigned long size,
241 gfp_t gfp_mask); 266 gfp_t gfp_mask);
242 267
268size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
269 size_t buflen, off_t skip, bool to_buffer);
270
243size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 271size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
244 void *buf, size_t buflen); 272 const void *buf, size_t buflen);
245size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 273size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
246 void *buf, size_t buflen); 274 void *buf, size_t buflen);
247 275
248size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 276size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
249 void *buf, size_t buflen, off_t skip); 277 const void *buf, size_t buflen, off_t skip);
250size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, 278size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
251 void *buf, size_t buflen, off_t skip); 279 void *buf, size_t buflen, off_t skip);
252 280
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6633e83e608a..04b5ada460b4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -58,6 +58,7 @@ struct sched_param {
58#include <linux/uidgid.h> 58#include <linux/uidgid.h>
59#include <linux/gfp.h> 59#include <linux/gfp.h>
60#include <linux/magic.h> 60#include <linux/magic.h>
61#include <linux/cgroup-defs.h>
61 62
62#include <asm/processor.h> 63#include <asm/processor.h>
63 64
@@ -191,8 +192,6 @@ struct task_group;
191#ifdef CONFIG_SCHED_DEBUG 192#ifdef CONFIG_SCHED_DEBUG
192extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 193extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
193extern void proc_sched_set_task(struct task_struct *p); 194extern void proc_sched_set_task(struct task_struct *p);
194extern void
195print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
196#endif 195#endif
197 196
198/* 197/*
@@ -755,18 +754,6 @@ struct signal_struct {
755 unsigned audit_tty_log_passwd; 754 unsigned audit_tty_log_passwd;
756 struct tty_audit_buf *tty_audit_buf; 755 struct tty_audit_buf *tty_audit_buf;
757#endif 756#endif
758#ifdef CONFIG_CGROUPS
759 /*
760 * group_rwsem prevents new tasks from entering the threadgroup and
761 * member tasks from exiting,a more specifically, setting of
762 * PF_EXITING. fork and exit paths are protected with this rwsem
763 * using threadgroup_change_begin/end(). Users which require
764 * threadgroup to remain stable should use threadgroup_[un]lock()
765 * which also takes care of exec path. Currently, cgroup is the
766 * only user.
767 */
768 struct rw_semaphore group_rwsem;
769#endif
770 757
771 oom_flags_t oom_flags; 758 oom_flags_t oom_flags;
772 short oom_score_adj; /* OOM kill score adjustment */ 759 short oom_score_adj; /* OOM kill score adjustment */
@@ -849,7 +836,7 @@ extern struct user_struct root_user;
849struct backing_dev_info; 836struct backing_dev_info;
850struct reclaim_state; 837struct reclaim_state;
851 838
852#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 839#ifdef CONFIG_SCHED_INFO
853struct sched_info { 840struct sched_info {
854 /* cumulative counters */ 841 /* cumulative counters */
855 unsigned long pcount; /* # of times run on this cpu */ 842 unsigned long pcount; /* # of times run on this cpu */
@@ -859,7 +846,7 @@ struct sched_info {
859 unsigned long long last_arrival,/* when we last ran on a cpu */ 846 unsigned long long last_arrival,/* when we last ran on a cpu */
860 last_queued; /* when we were last queued to run */ 847 last_queued; /* when we were last queued to run */
861}; 848};
862#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 849#endif /* CONFIG_SCHED_INFO */
863 850
864#ifdef CONFIG_TASK_DELAY_ACCT 851#ifdef CONFIG_TASK_DELAY_ACCT
865struct task_delay_info { 852struct task_delay_info {
@@ -1408,7 +1395,7 @@ struct task_struct {
1408 int rcu_tasks_idle_cpu; 1395 int rcu_tasks_idle_cpu;
1409#endif /* #ifdef CONFIG_TASKS_RCU */ 1396#endif /* #ifdef CONFIG_TASKS_RCU */
1410 1397
1411#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1398#ifdef CONFIG_SCHED_INFO
1412 struct sched_info sched_info; 1399 struct sched_info sched_info;
1413#endif 1400#endif
1414 1401
@@ -1535,8 +1522,6 @@ struct task_struct {
1535/* hung task detection */ 1522/* hung task detection */
1536 unsigned long last_switch_count; 1523 unsigned long last_switch_count;
1537#endif 1524#endif
1538/* CPU-specific state of this task */
1539 struct thread_struct thread;
1540/* filesystem information */ 1525/* filesystem information */
1541 struct fs_struct *fs; 1526 struct fs_struct *fs;
1542/* open file information */ 1527/* open file information */
@@ -1791,8 +1776,22 @@ struct task_struct {
1791 unsigned long task_state_change; 1776 unsigned long task_state_change;
1792#endif 1777#endif
1793 int pagefault_disabled; 1778 int pagefault_disabled;
1779/* CPU-specific state of this task */
1780 struct thread_struct thread;
1781/*
1782 * WARNING: on x86, 'thread_struct' contains a variable-sized
1783 * structure. It *MUST* be at the end of 'task_struct'.
1784 *
1785 * Do not put anything below here!
1786 */
1794}; 1787};
1795 1788
1789#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1790extern int arch_task_struct_size __read_mostly;
1791#else
1792# define arch_task_struct_size (sizeof(struct task_struct))
1793#endif
1794
1796/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1795/* Future-safe accessor for struct task_struct's cpus_allowed. */
1797#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1796#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1798 1797
@@ -2432,7 +2431,6 @@ extern void sched_dead(struct task_struct *p);
2432 2431
2433extern void proc_caches_init(void); 2432extern void proc_caches_init(void);
2434extern void flush_signals(struct task_struct *); 2433extern void flush_signals(struct task_struct *);
2435extern void __flush_signals(struct task_struct *);
2436extern void ignore_signals(struct task_struct *); 2434extern void ignore_signals(struct task_struct *);
2437extern void flush_signal_handlers(struct task_struct *, int force_default); 2435extern void flush_signal_handlers(struct task_struct *, int force_default);
2438extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 2436extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@ -2556,8 +2554,22 @@ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2556/* Remove the current tasks stale references to the old mm_struct */ 2554/* Remove the current tasks stale references to the old mm_struct */
2557extern void mm_release(struct task_struct *, struct mm_struct *); 2555extern void mm_release(struct task_struct *, struct mm_struct *);
2558 2556
2557#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2558extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2559 struct task_struct *, unsigned long);
2560#else
2559extern int copy_thread(unsigned long, unsigned long, unsigned long, 2561extern int copy_thread(unsigned long, unsigned long, unsigned long,
2560 struct task_struct *); 2562 struct task_struct *);
2563
2564/* Architectures that haven't opted into copy_thread_tls get the tls argument
2565 * via pt_regs, so ignore the tls argument passed via C. */
2566static inline int copy_thread_tls(
2567 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2568 struct task_struct *p, unsigned long tls)
2569{
2570 return copy_thread(clone_flags, sp, arg, p);
2571}
2572#endif
2561extern void flush_thread(void); 2573extern void flush_thread(void);
2562extern void exit_thread(void); 2574extern void exit_thread(void);
2563 2575
@@ -2576,6 +2588,7 @@ extern int do_execveat(int, struct filename *,
2576 const char __user * const __user *, 2588 const char __user * const __user *,
2577 const char __user * const __user *, 2589 const char __user * const __user *,
2578 int); 2590 int);
2591extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2579extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 2592extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2580struct task_struct *fork_idle(int); 2593struct task_struct *fork_idle(int);
2581extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2594extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
@@ -2710,53 +2723,33 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
2710 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2723 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2711} 2724}
2712 2725
2713#ifdef CONFIG_CGROUPS
2714static inline void threadgroup_change_begin(struct task_struct *tsk)
2715{
2716 down_read(&tsk->signal->group_rwsem);
2717}
2718static inline void threadgroup_change_end(struct task_struct *tsk)
2719{
2720 up_read(&tsk->signal->group_rwsem);
2721}
2722
2723/** 2726/**
2724 * threadgroup_lock - lock threadgroup 2727 * threadgroup_change_begin - mark the beginning of changes to a threadgroup
2725 * @tsk: member task of the threadgroup to lock 2728 * @tsk: task causing the changes
2726 *
2727 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2728 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2729 * change ->group_leader/pid. This is useful for cases where the threadgroup
2730 * needs to stay stable across blockable operations.
2731 *
2732 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2733 * synchronization. While held, no new task will be added to threadgroup
2734 * and no existing live task will have its PF_EXITING set.
2735 * 2729 *
2736 * de_thread() does threadgroup_change_{begin|end}() when a non-leader 2730 * All operations which modify a threadgroup - a new thread joining the
2737 * sub-thread becomes a new leader. 2731 * group, death of a member thread (the assertion of PF_EXITING) and
2732 * exec(2) dethreading the process and replacing the leader - are wrapped
2733 * by threadgroup_change_{begin|end}(). This is to provide a place which
2734 * subsystems needing threadgroup stability can hook into for
2735 * synchronization.
2738 */ 2736 */
2739static inline void threadgroup_lock(struct task_struct *tsk) 2737static inline void threadgroup_change_begin(struct task_struct *tsk)
2740{ 2738{
2741 down_write(&tsk->signal->group_rwsem); 2739 might_sleep();
2740 cgroup_threadgroup_change_begin(tsk);
2742} 2741}
2743 2742
2744/** 2743/**
2745 * threadgroup_unlock - unlock threadgroup 2744 * threadgroup_change_end - mark the end of changes to a threadgroup
2746 * @tsk: member task of the threadgroup to unlock 2745 * @tsk: task causing the changes
2747 * 2746 *
2748 * Reverse threadgroup_lock(). 2747 * See threadgroup_change_begin().
2749 */ 2748 */
2750static inline void threadgroup_unlock(struct task_struct *tsk) 2749static inline void threadgroup_change_end(struct task_struct *tsk)
2751{ 2750{
2752 up_write(&tsk->signal->group_rwsem); 2751 cgroup_threadgroup_change_end(tsk);
2753} 2752}
2754#else
2755static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2756static inline void threadgroup_change_end(struct task_struct *tsk) {}
2757static inline void threadgroup_lock(struct task_struct *tsk) {}
2758static inline void threadgroup_unlock(struct task_struct *tsk) {}
2759#endif
2760 2753
2761#ifndef __HAVE_THREAD_FUNCTIONS 2754#ifndef __HAVE_THREAD_FUNCTIONS
2762 2755
diff --git a/include/linux/scif.h b/include/linux/scif.h
new file mode 100644
index 000000000000..44f4f3898bbe
--- /dev/null
+++ b/include/linux/scif.h
@@ -0,0 +1,993 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
22 * Copyright(c) 2014 Intel Corporation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * Intel SCIF driver.
51 *
52 */
53#ifndef __SCIF_H__
54#define __SCIF_H__
55
56#include <linux/types.h>
57#include <linux/poll.h>
58#include <linux/scif_ioctl.h>
59
60#define SCIF_ACCEPT_SYNC 1
61#define SCIF_SEND_BLOCK 1
62#define SCIF_RECV_BLOCK 1
63
64enum {
65 SCIF_PROT_READ = (1 << 0),
66 SCIF_PROT_WRITE = (1 << 1)
67};
68
69enum {
70 SCIF_MAP_FIXED = 0x10,
71 SCIF_MAP_KERNEL = 0x20,
72};
73
74enum {
75 SCIF_FENCE_INIT_SELF = (1 << 0),
76 SCIF_FENCE_INIT_PEER = (1 << 1),
77 SCIF_SIGNAL_LOCAL = (1 << 4),
78 SCIF_SIGNAL_REMOTE = (1 << 5)
79};
80
81enum {
82 SCIF_RMA_USECPU = (1 << 0),
83 SCIF_RMA_USECACHE = (1 << 1),
84 SCIF_RMA_SYNC = (1 << 2),
85 SCIF_RMA_ORDERED = (1 << 3)
86};
87
88/* End of SCIF Admin Reserved Ports */
89#define SCIF_ADMIN_PORT_END 1024
90
91/* End of SCIF Reserved Ports */
92#define SCIF_PORT_RSVD 1088
93
94typedef struct scif_endpt *scif_epd_t;
95
96#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
97#define SCIF_REGISTER_FAILED ((off_t)-1)
98#define SCIF_MMAP_FAILED ((void *)-1)
99
100/**
101 * scif_open() - Create an endpoint
102 *
103 * Return:
104 * Upon successful completion, scif_open() returns an endpoint descriptor to
105 * be used in subsequent SCIF functions calls to refer to that endpoint;
106 * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
107 * returned and errno is set to indicate the error; in kernel mode a NULL
108 * scif_epd_t is returned.
109 *
110 * Errors:
111 * ENOMEM - Insufficient kernel memory was available
112 */
113scif_epd_t scif_open(void);
114
115/**
116 * scif_bind() - Bind an endpoint to a port
117 * @epd: endpoint descriptor
118 * @pn: port number
119 *
120 * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
121 * local node. If pn is zero, a port number greater than or equal to
122 * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
123 * exactly one local port. Ports less than 1024 when requested can only be bound
124 * by system (or root) processes or by processes executed by privileged users.
125 *
126 * Return:
127 * Upon successful completion, scif_bind() returns the port number to which epd
128 * is bound; otherwise in user mode -1 is returned and errno is set to
129 * indicate the error; in kernel mode the negative of one of the following
130 * errors is returned.
131 *
132 * Errors:
133 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
134 * EINVAL - the endpoint or the port is already bound
135 * EISCONN - The endpoint is already connected
136 * ENOSPC - No port number available for assignment
137 * EACCES - The port requested is protected and the user is not the superuser
138 */
139int scif_bind(scif_epd_t epd, u16 pn);
140
141/**
142 * scif_listen() - Listen for connections on an endpoint
143 * @epd: endpoint descriptor
144 * @backlog: maximum pending connection requests
145 *
146 * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
147 * an endpoint that will be used to accept incoming connection requests. Once
148 * so marked, the endpoint is said to be in the listening state and may not be
149 * used as the endpoint of a connection.
150 *
151 * The endpoint, epd, must have been bound to a port.
152 *
153 * The backlog argument defines the maximum length to which the queue of
154 * pending connections for epd may grow. If a connection request arrives when
155 * the queue is full, the client may receive an error with an indication that
156 * the connection was refused.
157 *
158 * Return:
159 * Upon successful completion, scif_listen() returns 0; otherwise in user mode
160 * -1 is returned and errno is set to indicate the error; in kernel mode the
161 * negative of one of the following errors is returned.
162 *
163 * Errors:
164 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
165 * EINVAL - the endpoint is not bound to a port
166 * EISCONN - The endpoint is already connected or listening
167 */
168int scif_listen(scif_epd_t epd, int backlog);
169
170/**
171 * scif_connect() - Initiate a connection on a port
172 * @epd: endpoint descriptor
173 * @dst: global id of port to which to connect
174 *
175 * The scif_connect() function requests the connection of endpoint epd to remote
176 * port dst. If the connection is successful, a peer endpoint, bound to dst, is
177 * created on node dst.node. On successful return, the connection is complete.
178 *
179 * If the endpoint epd has not already been bound to a port, scif_connect()
180 * will bind it to an unused local port.
181 *
182 * A connection is terminated when an endpoint of the connection is closed,
183 * either explicitly by scif_close(), or when a process that owns one of the
184 * endpoints of the connection is terminated.
185 *
186 * In user space, scif_connect() supports an asynchronous connection mode
187 * if the application has set the O_NONBLOCK flag on the endpoint via the
188 * fcntl() system call. Setting this flag will result in the calling process
189 * not to wait during scif_connect().
190 *
191 * Return:
192 * Upon successful completion, scif_connect() returns the port ID to which the
193 * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
194 * set to indicate the error; in kernel mode the negative of one of the
195 * following errors is returned.
196 *
197 * Errors:
198 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
199 * ECONNREFUSED - The destination was not listening for connections or refused
200 * the connection request
201 * EINVAL - dst.port is not a valid port ID
202 * EISCONN - The endpoint is already connected
203 * ENOMEM - No buffer space is available
204 * ENODEV - The destination node does not exist, or the node is lost or existed,
205 * but is not currently in the network since it may have crashed
206 * ENOSPC - No port number available for assignment
207 * EOPNOTSUPP - The endpoint is listening and cannot be connected
208 */
209int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
210
211/**
212 * scif_accept() - Accept a connection on an endpoint
213 * @epd: endpoint descriptor
214 * @peer: global id of port to which connected
215 * @newepd: new connected endpoint descriptor
216 * @flags: flags
217 *
218 * The scif_accept() call extracts the first connection request from the queue
219 * of pending connections for the port on which epd is listening. scif_accept()
220 * creates a new endpoint, bound to the same port as epd, and allocates a new
221 * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
222 * endpoint is connected to the endpoint through which the connection was
223 * requested. epd is unaffected by this call, and remains in the listening
224 * state.
225 *
226 * On successful return, peer holds the global port identifier (node id and
227 * local port number) of the port which requested the connection.
228 *
229 * A connection is terminated when an endpoint of the connection is closed,
230 * either explicitly by scif_close(), or when a process that owns one of the
231 * endpoints of the connection is terminated.
232 *
233 * The number of connections that can (subsequently) be accepted on epd is only
234 * limited by system resources (memory).
235 *
236 * The flags argument is formed by OR'ing together zero or more of the
237 * following values.
238 * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
239 * SCIF_ACCEPT_SYNC is not in flags, and no pending
240 * connections are present on the queue, scif_accept()
241 * fails with an EAGAIN error
242 *
243 * In user mode, the select() and poll() functions can be used to determine
244 * when there is a connection request. In kernel mode, the scif_poll()
245 * function may be used for this purpose. A readable event will be delivered
246 * when a connection is requested.
247 *
248 * Return:
249 * Upon successful completion, scif_accept() returns 0; otherwise in user mode
250 * -1 is returned and errno is set to indicate the error; in kernel mode the
251 * negative of one of the following errors is returned.
252 *
253 * Errors:
254 * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
255 * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
256 * its connection request
257 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
258 * EINTR - Interrupted function
259 * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
260 * NULL, or newepd is NULL
261 * ENODEV - The requesting node is lost or existed, but is not currently in the
262 * network since it may have crashed
263 * ENOMEM - Not enough space
264 * ENOENT - Secondary part of epd registration failed
265 */
266int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
267 *newepd, int flags);
268
269/**
270 * scif_close() - Close an endpoint
271 * @epd: endpoint descriptor
272 *
273 * scif_close() closes an endpoint and performs necessary teardown of
274 * facilities associated with that endpoint.
275 *
276 * If epd is a listening endpoint then it will no longer accept connection
277 * requests on the port to which it is bound. Any pending connection requests
278 * are rejected.
279 *
280 * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
281 * which are in-process through epd or its peer endpoint will complete before
282 * scif_close() returns. Registered windows of the local and peer endpoints are
283 * released as if scif_unregister() was called against each window.
284 *
285 * Closing a SCIF endpoint does not affect local registered memory mapped by
286 * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
287 * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
288 *
289 * If the peer endpoint's receive queue is not empty at the time that epd is
290 * closed, then the peer endpoint can be passed as the endpoint parameter to
291 * scif_recv() until the receive queue is empty.
292 *
293 * epd is freed and may no longer be accessed.
294 *
295 * Return:
296 * Upon successful completion, scif_close() returns 0; otherwise in user mode
297 * -1 is returned and errno is set to indicate the error; in kernel mode the
298 * negative of one of the following errors is returned.
299 *
300 * Errors:
301 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
302 */
303int scif_close(scif_epd_t epd);
304
305/**
306 * scif_send() - Send a message
307 * @epd: endpoint descriptor
308 * @msg: message buffer address
309 * @len: message length
310 * @flags: blocking mode flags
311 *
312 * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
313 * are copied from memory starting at address msg. On successful execution the
314 * return value of scif_send() is the number of bytes that were sent, and is
315 * zero if no bytes were sent because len was zero. scif_send() may be called
316 * only when the endpoint is in a connected state.
317 *
318 * If a scif_send() call is non-blocking, then it sends only those bytes which
319 * can be sent without waiting, up to a maximum of len bytes.
320 *
321 * If a scif_send() call is blocking, then it normally returns after sending
322 * all len bytes. If a blocking call is interrupted or the connection is
323 * reset, the call is considered successful if some bytes were sent or len is
324 * zero, otherwise the call is considered unsuccessful.
325 *
326 * In user mode, the select() and poll() functions can be used to determine
327 * when the send queue is not full. In kernel mode, the scif_poll() function
328 * may be used for this purpose.
329 *
330 * It is recommended that scif_send()/scif_recv() only be used for short
331 * control-type message communication between SCIF endpoints. The SCIF RMA
332 * APIs are expected to provide better performance for transfer sizes of
333 * 1024 bytes or longer for the current MIC hardware and software
334 * implementation.
335 *
336 * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
337 * is passed as the flags argument.
338 *
339 * Return:
340 * Upon successful completion, scif_send() returns the number of bytes sent;
341 * otherwise in user mode -1 is returned and errno is set to indicate the
342 * error; in kernel mode the negative of one of the following errors is
343 * returned.
344 *
345 * Errors:
346 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
347 * ECONNRESET - Connection reset by peer
348 * EFAULT - An invalid address was specified for a parameter
349 * EINVAL - flags is invalid, or len is negative
350 * ENODEV - The remote node is lost or existed, but is not currently in the
351 * network since it may have crashed
352 * ENOMEM - Not enough space
353 * ENOTCONN - The endpoint is not connected
354 */
355int scif_send(scif_epd_t epd, void *msg, int len, int flags);
356
357/**
358 * scif_recv() - Receive a message
359 * @epd: endpoint descriptor
360 * @msg: message buffer address
361 * @len: message buffer length
362 * @flags: blocking mode flags
363 *
364 * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
365 * data are copied to memory starting at address msg. On successful execution
366 * the return value of scif_recv() is the number of bytes that were received,
367 * and is zero if no bytes were received because len was zero. scif_recv() may
368 * be called only when the endpoint is in a connected state.
369 *
370 * If a scif_recv() call is non-blocking, then it receives only those bytes
371 * which can be received without waiting, up to a maximum of len bytes.
372 *
373 * If a scif_recv() call is blocking, then it normally returns after receiving
374 * all len bytes. If the blocking call was interrupted due to a disconnection,
375 * subsequent calls to scif_recv() will copy all bytes received upto the point
376 * of disconnection.
377 *
378 * In user mode, the select() and poll() functions can be used to determine
379 * when data is available to be received. In kernel mode, the scif_poll()
380 * function may be used for this purpose.
381 *
382 * It is recommended that scif_send()/scif_recv() only be used for short
383 * control-type message communication between SCIF endpoints. The SCIF RMA
384 * APIs are expected to provide better performance for transfer sizes of
385 * 1024 bytes or longer for the current MIC hardware and software
386 * implementation.
387 *
388 * scif_recv() will block until the entire message is received if
389 * SCIF_RECV_BLOCK is passed as the flags argument.
390 *
391 * Return:
392 * Upon successful completion, scif_recv() returns the number of bytes
393 * received; otherwise in user mode -1 is returned and errno is set to
394 * indicate the error; in kernel mode the negative of one of the following
395 * errors is returned.
396 *
397 * Errors:
398 * EAGAIN - The destination node is returning from a low power state
399 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
400 * ECONNRESET - Connection reset by peer
401 * EFAULT - An invalid address was specified for a parameter
402 * EINVAL - flags is invalid, or len is negative
403 * ENODEV - The remote node is lost or existed, but is not currently in the
404 * network since it may have crashed
405 * ENOMEM - Not enough space
406 * ENOTCONN - The endpoint is not connected
407 */
408int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
409
410/**
411 * scif_register() - Mark a memory region for remote access.
412 * @epd: endpoint descriptor
413 * @addr: starting virtual address
414 * @len: length of range
415 * @offset: offset of window
416 * @prot_flags: read/write protection flags
417 * @map_flags: mapping flags
418 *
419 * The scif_register() function opens a window, a range of whole pages of the
420 * registered address space of the endpoint epd, starting at offset po and
421 * continuing for len bytes. The value of po, further described below, is a
422 * function of the parameters offset and len, and the value of map_flags. Each
423 * page of the window represents the physical memory page which backs the
424 * corresponding page of the range of virtual address pages starting at addr
425 * and continuing for len bytes. addr and len are constrained to be multiples
426 * of the page size. A successful scif_register() call returns po.
427 *
428 * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
429 * exactly, and offset is constrained to be a multiple of the page size. The
430 * mapping established by scif_register() will not replace any existing
431 * registration; an error is returned if any page within the range [offset,
432 * offset + len - 1] intersects an existing window.
433 *
434 * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
435 * implementation-defined manner to arrive at po. The po value so chosen will
436 * be an area of the registered address space that the implementation deems
437 * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
438 * granting the implementation complete freedom in selecting po, subject to
439 * constraints described below. A non-zero value of offset is taken to be a
440 * suggestion of an offset near which the mapping should be placed. When the
441 * implementation selects a value for po, it does not replace any extant
442 * window. In all cases, po will be a multiple of the page size.
443 *
444 * The physical pages which are so represented by a window are available for
445 * access in calls to mmap(), scif_readfrom(), scif_writeto(),
446 * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
447 * physical pages represented by the window will not be reused by the memory
448 * subsystem for any other purpose. Note that the same physical page may be
449 * represented by multiple windows.
450 *
451 * Subsequent operations which change the memory pages to which virtual
452 * addresses are mapped (such as mmap(), munmap()) have no effect on
453 * existing window.
454 *
455 * If the process will fork(), it is recommended that the registered
456 * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
457 * problems due to copy-on-write semantics.
458 *
459 * The prot_flags argument is formed by OR'ing together one or more of the
460 * following values.
461 * SCIF_PROT_READ - allow read operations from the window
462 * SCIF_PROT_WRITE - allow write operations to the window
463 *
464 * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
465 * fixed offset.
466 *
467 * Return:
468 * Upon successful completion, scif_register() returns the offset at which the
469 * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
470 * is (off_t *)-1) is returned and errno is set to indicate the error; in
471 * kernel mode the negative of one of the following errors is returned.
472 *
473 * Errors:
474 * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
475 * [offset, offset + len -1] are already registered
476 * EAGAIN - The mapping could not be performed due to lack of resources
477 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
478 * ECONNRESET - Connection reset by peer
479 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
480 * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
481 * set in flags, and offset is not a multiple of the page size, or addr is not a
482 * multiple of the page size, or len is not a multiple of the page size, or is
483 * 0, or offset is negative
484 * ENODEV - The remote node is lost or existed, but is not currently in the
485 * network since it may have crashed
486 * ENOMEM - Not enough space
487 * ENOTCONN -The endpoint is not connected
488 */
489off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
490 int prot_flags, int map_flags);
491
492/**
493 * scif_unregister() - Mark a memory region for remote access.
494 * @epd: endpoint descriptor
495 * @offset: start of range to unregister
496 * @len: length of range to unregister
497 *
498 * The scif_unregister() function closes those previously registered windows
499 * which are entirely within the range [offset, offset + len - 1]. It is an
500 * error to specify a range which intersects only a subrange of a window.
501 *
502 * On a successful return, pages within the window may no longer be specified
503 * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
504 * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
505 * however, continues to exist until all previous references against it are
506 * removed. A window is referenced if there is a mapping to it created by
507 * mmap(), or if scif_get_pages() was called against the window
508 * (and the pages have not been returned via scif_put_pages()). A window is
509 * also referenced while an RMA, in which some range of the window is a source
510 * or destination, is in progress. Finally a window is referenced while some
511 * offset in that window was specified to scif_fence_signal(), and the RMAs
512 * marked by that call to scif_fence_signal() have not completed. While a
513 * window is in this state, its registered address space pages are not
514 * available for use in a new registered window.
515 *
516 * When all such references to the window have been removed, its references to
517 * all the physical pages which it represents are removed. Similarly, the
518 * registered address space pages of the window become available for
519 * registration in a new window.
520 *
521 * Return:
522 * Upon successful completion, scif_unregister() returns 0; otherwise in user
523 * mode -1 is returned and errno is set to indicate the error; in kernel mode
524 * the negative of one of the following errors is returned. In the event of an
525 * error, no windows are unregistered.
526 *
527 * Errors:
528 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
529 * ECONNRESET - Connection reset by peer
530 * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
531 * window, or offset is negative
532 * ENODEV - The remote node is lost or existed, but is not currently in the
533 * network since it may have crashed
534 * ENOTCONN - The endpoint is not connected
535 * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
536 * registered address space of epd
537 */
538int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
539
540/**
541 * scif_readfrom() - Copy from a remote address space
542 * @epd: endpoint descriptor
543 * @loffset: offset in local registered address space to
544 * which to copy
545 * @len: length of range to copy
546 * @roffset: offset in remote registered address space
547 * from which to copy
548 * @rma_flags: transfer mode flags
549 *
550 * scif_readfrom() copies len bytes from the remote registered address space of
551 * the peer of endpoint epd, starting at the offset roffset to the local
552 * registered address space of epd, starting at the offset loffset.
553 *
554 * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
555 * roffset + len - 1] must be within some registered window or windows of the
556 * local and remote nodes. A range may intersect multiple registered windows,
557 * but only if those windows are contiguous in the registered address space.
558 *
559 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
560 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
561 * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
562 * transfer is complete. Otherwise, the transfer may be performed asynchron-
563 * ously. The order in which any two asynchronous RMA operations complete
564 * is non-deterministic. The synchronization functions, scif_fence_mark()/
565 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
566 * the completion of asynchronous RMA operations on the same endpoint.
567 *
568 * The DMA transfer of individual bytes is not guaranteed to complete in
569 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
570 * cacheline or partial cacheline of the source range will become visible on
571 * the destination node after all other transferred data in the source
572 * range has become visible on the destination node.
573 *
574 * The optimal DMA performance will likely be realized if both
575 * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
576 * performance will likely be realized if loffset and roffset are not
577 * cacheline aligned but are separated by some multiple of 64. The lowest level
578 * of performance is likely if loffset and roffset are not separated by a
579 * multiple of 64.
580 *
581 * The rma_flags argument is formed by ORing together zero or more of the
582 * following values.
583 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
584 * engine.
585 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
586 * transfer has completed. Passing this flag results in the
587 * current implementation busy waiting and consuming CPU cycles
588 * while the DMA transfer is in progress for best performance by
589 * avoiding the interrupt latency.
590 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
591 * the source range becomes visible on the destination node
592 * after all other transferred data in the source range has
593 * become visible on the destination
594 *
595 * Return:
596 * Upon successful completion, scif_readfrom() returns 0; otherwise in user
597 * mode -1 is returned and errno is set to indicate the error; in kernel mode
598 * the negative of one of the following errors is returned.
599 *
600 * Errors:
601 * EACCESS - Attempt to write to a read-only range
602 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
603 * ECONNRESET - Connection reset by peer
604 * EINVAL - rma_flags is invalid
605 * ENODEV - The remote node is lost or existed, but is not currently in the
606 * network since it may have crashed
607 * ENOTCONN - The endpoint is not connected
608 * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
609 * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
610 * for the registered address space of the peer of epd, or loffset or roffset
611 * is negative
612 */
613int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
614 roffset, int rma_flags);
615
616/**
617 * scif_writeto() - Copy to a remote address space
618 * @epd: endpoint descriptor
619 * @loffset: offset in local registered address space
620 * from which to copy
621 * @len: length of range to copy
622 * @roffset: offset in remote registered address space to
623 * which to copy
624 * @rma_flags: transfer mode flags
625 *
626 * scif_writeto() copies len bytes from the local registered address space of
627 * epd, starting at the offset loffset to the remote registered address space
628 * of the peer of endpoint epd, starting at the offset roffset.
629 *
630 * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
631 * roffset + len - 1] must be within some registered window or windows of the
632 * local and remote nodes. A range may intersect multiple registered windows,
633 * but only if those windows are contiguous in the registered address space.
634 *
635 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
636 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
637 * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
638 * transfer is complete. Otherwise, the transfer may be performed asynchron-
639 * ously. The order in which any two asynchronous RMA operations complete
640 * is non-deterministic. The synchronization functions, scif_fence_mark()/
641 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
642 * the completion of asynchronous RMA operations on the same endpoint.
643 *
644 * The DMA transfer of individual bytes is not guaranteed to complete in
645 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
646 * cacheline or partial cacheline of the source range will become visible on
647 * the destination node after all other transferred data in the source
648 * range has become visible on the destination node.
649 *
650 * The optimal DMA performance will likely be realized if both
651 * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
652 * performance will likely be realized if loffset and roffset are not cacheline
653 * aligned but are separated by some multiple of 64. The lowest level of
654 * performance is likely if loffset and roffset are not separated by a multiple
655 * of 64.
656 *
657 * The rma_flags argument is formed by ORing together zero or more of the
658 * following values.
659 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
660 * engine.
661 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
662 * transfer has completed. Passing this flag results in the
663 * current implementation busy waiting and consuming CPU cycles
664 * while the DMA transfer is in progress for best performance by
665 * avoiding the interrupt latency.
666 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
667 * the source range becomes visible on the destination node
668 * after all other transferred data in the source range has
669 * become visible on the destination
670 *
671 * Return:
672 * Upon successful completion, scif_readfrom() returns 0; otherwise in user
673 * mode -1 is returned and errno is set to indicate the error; in kernel mode
674 * the negative of one of the following errors is returned.
675 *
676 * Errors:
677 * EACCESS - Attempt to write to a read-only range
678 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
679 * ECONNRESET - Connection reset by peer
680 * EINVAL - rma_flags is invalid
681 * ENODEV - The remote node is lost or existed, but is not currently in the
682 * network since it may have crashed
683 * ENOTCONN - The endpoint is not connected
684 * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
685 * address space of epd, or, The range [roffset , roffset + len -1] is invalid
686 * for the registered address space of the peer of epd, or loffset or roffset
687 * is negative
688 */
689int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
690 roffset, int rma_flags);
691
692/**
693 * scif_vreadfrom() - Copy from a remote address space
694 * @epd: endpoint descriptor
695 * @addr: address to which to copy
696 * @len: length of range to copy
697 * @roffset: offset in remote registered address space
698 * from which to copy
699 * @rma_flags: transfer mode flags
700 *
701 * scif_vreadfrom() copies len bytes from the remote registered address
702 * space of the peer of endpoint epd, starting at the offset roffset, to local
703 * memory, starting at addr.
704 *
705 * The specified range [roffset, roffset + len - 1] must be within some
706 * registered window or windows of the remote nodes. The range may
707 * intersect multiple registered windows, but only if those windows are
708 * contiguous in the registered address space.
709 *
710 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
711 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
712 * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
713 * transfer is complete. Otherwise, the transfer may be performed asynchron-
714 * ously. The order in which any two asynchronous RMA operations complete
715 * is non-deterministic. The synchronization functions, scif_fence_mark()/
716 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
717 * the completion of asynchronous RMA operations on the same endpoint.
718 *
719 * The DMA transfer of individual bytes is not guaranteed to complete in
720 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
721 * cacheline or partial cacheline of the source range will become visible on
722 * the destination node after all other transferred data in the source
723 * range has become visible on the destination node.
724 *
725 * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
726 * the specified local memory range may be remain in a pinned state even after
727 * the specified transfer completes. This may reduce overhead if some or all of
728 * the same virtual address range is referenced in a subsequent call of
729 * scif_vreadfrom() or scif_vwriteto().
730 *
731 * The optimal DMA performance will likely be realized if both
732 * addr and roffset are cacheline aligned (are a multiple of 64). Lower
733 * performance will likely be realized if addr and roffset are not
734 * cacheline aligned but are separated by some multiple of 64. The lowest level
735 * of performance is likely if addr and roffset are not separated by a
736 * multiple of 64.
737 *
738 * The rma_flags argument is formed by ORing together zero or more of the
739 * following values.
740 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
741 * engine.
742 * SCIF_RMA_USECACHE - enable registration caching
743 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
744 * transfer has completed. Passing this flag results in the
745 * current implementation busy waiting and consuming CPU cycles
746 * while the DMA transfer is in progress for best performance by
747 * avoiding the interrupt latency.
748 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
749 * the source range becomes visible on the destination node
750 * after all other transferred data in the source range has
751 * become visible on the destination
752 *
753 * Return:
754 * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
755 * mode -1 is returned and errno is set to indicate the error; in kernel mode
756 * the negative of one of the following errors is returned.
757 *
758 * Errors:
759 * EACCESS - Attempt to write to a read-only range
760 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
761 * ECONNRESET - Connection reset by peer
762 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
763 * EINVAL - rma_flags is invalid
764 * ENODEV - The remote node is lost or existed, but is not currently in the
765 * network since it may have crashed
766 * ENOTCONN - The endpoint is not connected
767 * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
768 * registered address space of epd
769 */
770int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
771 int rma_flags);
772
773/**
774 * scif_vwriteto() - Copy to a remote address space
775 * @epd: endpoint descriptor
776 * @addr: address from which to copy
777 * @len: length of range to copy
778 * @roffset: offset in remote registered address space to
779 * which to copy
780 * @rma_flags: transfer mode flags
781 *
782 * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
783 * the remote registered address space of the peer of endpoint epd, starting at
784 * the offset roffset.
785 *
786 * The specified range [roffset, roffset + len - 1] must be within some
787 * registered window or windows of the remote nodes. The range may intersect
788 * multiple registered windows, but only if those windows are contiguous in the
789 * registered address space.
790 *
791 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
792 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
793 * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
794 * transfer is complete. Otherwise, the transfer may be performed asynchron-
795 * ously. The order in which any two asynchronous RMA operations complete
796 * is non-deterministic. The synchronization functions, scif_fence_mark()/
797 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
798 * the completion of asynchronous RMA operations on the same endpoint.
799 *
800 * The DMA transfer of individual bytes is not guaranteed to complete in
801 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
802 * cacheline or partial cacheline of the source range will become visible on
803 * the destination node after all other transferred data in the source
804 * range has become visible on the destination node.
805 *
806 * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
807 * the specified local memory range may be remain in a pinned state even after
808 * the specified transfer completes. This may reduce overhead if some or all of
809 * the same virtual address range is referenced in a subsequent call of
810 * scif_vreadfrom() or scif_vwriteto().
811 *
812 * The optimal DMA performance will likely be realized if both
813 * addr and offset are cacheline aligned (are a multiple of 64). Lower
814 * performance will likely be realized if addr and offset are not cacheline
815 * aligned but are separated by some multiple of 64. The lowest level of
816 * performance is likely if addr and offset are not separated by a multiple of
817 * 64.
818 *
819 * The rma_flags argument is formed by ORing together zero or more of the
820 * following values.
821 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
822 * engine.
823 * SCIF_RMA_USECACHE - allow registration caching
824 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
825 * transfer has completed. Passing this flag results in the
826 * current implementation busy waiting and consuming CPU cycles
827 * while the DMA transfer is in progress for best performance by
828 * avoiding the interrupt latency.
829 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
830 * the source range becomes visible on the destination node
831 * after all other transferred data in the source range has
832 * become visible on the destination
833 *
834 * Return:
835 * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
836 * mode -1 is returned and errno is set to indicate the error; in kernel mode
837 * the negative of one of the following errors is returned.
838 *
839 * Errors:
840 * EACCESS - Attempt to write to a read-only range
841 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
842 * ECONNRESET - Connection reset by peer
843 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
844 * EINVAL - rma_flags is invalid
845 * ENODEV - The remote node is lost or existed, but is not currently in the
846 * network since it may have crashed
847 * ENOTCONN - The endpoint is not connected
848 * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
849 * registered address space of epd
850 */
851int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
852 int rma_flags);
853
854/**
855 * scif_fence_mark() - Mark previously issued RMAs
856 * @epd: endpoint descriptor
857 * @flags: control flags
858 * @mark: marked value returned as output.
859 *
860 * scif_fence_mark() returns after marking the current set of all uncompleted
861 * RMAs initiated through the endpoint epd or the current set of all
862 * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
863 * marked with a value returned at mark. The application may subsequently call
864 * scif_fence_wait(), passing the value returned at mark, to await completion
865 * of all RMAs so marked.
866 *
867 * The flags argument has exactly one of the following values.
868 * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
869 * epd are marked
870 * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
871 * of endpoint epd are marked
872 *
873 * Return:
874 * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
875 * mode -1 is returned and errno is set to indicate the error; in kernel mode
876 * the negative of one of the following errors is returned.
877 *
878 * Errors:
879 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
880 * ECONNRESET - Connection reset by peer
881 * EINVAL - flags is invalid
882 * ENODEV - The remote node is lost or existed, but is not currently in the
883 * network since it may have crashed
884 * ENOTCONN - The endpoint is not connected
885 * ENOMEM - Insufficient kernel memory was available
886 */
887int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
888
889/**
890 * scif_fence_wait() - Wait for completion of marked RMAs
891 * @epd: endpoint descriptor
892 * @mark: mark request
893 *
894 * scif_fence_wait() returns after all RMAs marked with mark have completed.
895 * The value passed in mark must have been obtained in a previous call to
896 * scif_fence_mark().
897 *
898 * Return:
899 * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
900 * mode -1 is returned and errno is set to indicate the error; in kernel mode
901 * the negative of one of the following errors is returned.
902 *
903 * Errors:
904 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
905 * ECONNRESET - Connection reset by peer
906 * ENODEV - The remote node is lost or existed, but is not currently in the
907 * network since it may have crashed
908 * ENOTCONN - The endpoint is not connected
909 * ENOMEM - Insufficient kernel memory was available
910 */
911int scif_fence_wait(scif_epd_t epd, int mark);
912
913/**
914 * scif_fence_signal() - Request a memory update on completion of RMAs
915 * @epd: endpoint descriptor
916 * @loff: local offset
917 * @lval: local value to write to loffset
918 * @roff: remote offset
919 * @rval: remote value to write to roffset
920 * @flags: flags
921 *
922 * scif_fence_signal() returns after marking the current set of all uncompleted
923 * RMAs initiated through the endpoint epd or marking the current set of all
924 * uncompleted RMAs initiated through the peer of endpoint epd.
925 *
926 * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
927 * marked set, lval is written to memory at the address corresponding to offset
928 * loff in the local registered address space of epd. loff must be within a
929 * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
930 * of the RMAs in the marked set, rval is written to memory at the address
931 * corresponding to offset roff in the remote registered address space of epd.
932 * roff must be within a remote registered window of the peer of epd. Note
933 * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
934 *
935 * The flags argument is formed by OR'ing together the following.
936 * Exactly one of the following values.
937 * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
938 * epd are marked
939 * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
940 * of endpoint epd are marked
941 * One or more of the following values.
942 * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
943 * memory at the address corresponding to offset loff in the local
944 * registered address space of epd.
945 * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
946 * memory at the address corresponding to offset roff in the remote
947 * registered address space of epd.
948 *
949 * Return:
950 * Upon successful completion, scif_fence_signal() returns 0; otherwise in
951 * user mode -1 is returned and errno is set to indicate the error; in kernel
952 * mode the negative of one of the following errors is returned.
953 *
954 * Errors:
955 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
956 * ECONNRESET - Connection reset by peer
957 * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
958 * ENODEV - The remote node is lost or existed, but is not currently in the
959 * network since it may have crashed
960 * ENOTCONN - The endpoint is not connected
961 * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
962 * for the registered address space, of the peer of epd
963 */
964int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
965 u64 rval, int flags);
966
967/**
968 * scif_get_node_ids() - Return information about online nodes
969 * @nodes: array in which to return online node IDs
970 * @len: number of entries in the nodes array
971 * @self: address to place the node ID of the local node
972 *
973 * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
974 * nodes in the SCIF network. If there is not enough space in nodes, as
975 * indicated by the len parameter, only len node IDs are returned in nodes. The
976 * return value of scif_get_node_ids() is the total number of nodes currently in
977 * the SCIF network. By checking the return value against the len parameter,
978 * the user may determine if enough space for nodes was allocated.
979 *
980 * The node ID of the local node is returned at self.
981 *
982 * Return:
983 * Upon successful completion, scif_get_node_ids() returns the actual number of
984 * online nodes in the SCIF network including 'self'; otherwise in user mode
985 * -1 is returned and errno is set to indicate the error; in kernel mode no
986 * errors are returned.
987 *
988 * Errors:
989 * EFAULT - Bad address
990 */
991int scif_get_node_ids(u16 *nodes, int len, u16 *self);
992
993#endif /* __SCIF_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 52febde52479..79d85ddf8093 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/mm.h>
30 31
31struct linux_binprm; 32struct linux_binprm;
32struct cred; 33struct cred;
@@ -53,9 +54,6 @@ struct xattr;
53struct xfrm_sec_ctx; 54struct xfrm_sec_ctx;
54struct mm_struct; 55struct mm_struct;
55 56
56/* Maximum number of letters for an LSM name string */
57#define SECURITY_NAME_MAX 10
58
59/* If capable should audit the security request */ 57/* If capable should audit the security request */
60#define SECURITY_CAP_NOAUDIT 0 58#define SECURITY_CAP_NOAUDIT 0
61#define SECURITY_CAP_AUDIT 1 59#define SECURITY_CAP_AUDIT 1
@@ -68,10 +66,7 @@ struct audit_krule;
68struct user_namespace; 66struct user_namespace;
69struct timezone; 67struct timezone;
70 68
71/* 69/* These functions are in security/commoncap.c */
72 * These functions are in security/capability.c and are used
73 * as the default capabilities functions
74 */
75extern int cap_capable(const struct cred *cred, struct user_namespace *ns, 70extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
76 int cap, int audit); 71 int cap, int audit);
77extern int cap_settime(const struct timespec *ts, const struct timezone *tz); 72extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
@@ -113,10 +108,6 @@ struct xfrm_state;
113struct xfrm_user_sec_ctx; 108struct xfrm_user_sec_ctx;
114struct seq_file; 109struct seq_file;
115 110
116extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
117
118void reset_security_ops(void);
119
120#ifdef CONFIG_MMU 111#ifdef CONFIG_MMU
121extern unsigned long mmap_min_addr; 112extern unsigned long mmap_min_addr;
122extern unsigned long dac_mmap_min_addr; 113extern unsigned long dac_mmap_min_addr;
@@ -187,1583 +178,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
187 opts->num_mnt_opts = 0; 178 opts->num_mnt_opts = 0;
188} 179}
189 180
190/**
191 * struct security_operations - main security structure
192 *
193 * Security module identifier.
194 *
195 * @name:
196 * A string that acts as a unique identifier for the LSM with max number
197 * of characters = SECURITY_NAME_MAX.
198 *
199 * Security hooks for program execution operations.
200 *
201 * @bprm_set_creds:
202 * Save security information in the bprm->security field, typically based
203 * on information about the bprm->file, for later use by the apply_creds
204 * hook. This hook may also optionally check permissions (e.g. for
205 * transitions between security domains).
206 * This hook may be called multiple times during a single execve, e.g. for
207 * interpreters. The hook can tell whether it has already been called by
208 * checking to see if @bprm->security is non-NULL. If so, then the hook
209 * may decide either to retain the security information saved earlier or
210 * to replace it.
211 * @bprm contains the linux_binprm structure.
212 * Return 0 if the hook is successful and permission is granted.
213 * @bprm_check_security:
214 * This hook mediates the point when a search for a binary handler will
215 * begin. It allows a check the @bprm->security value which is set in the
216 * preceding set_creds call. The primary difference from set_creds is
217 * that the argv list and envp list are reliably available in @bprm. This
218 * hook may be called multiple times during a single execve; and in each
219 * pass set_creds is called first.
220 * @bprm contains the linux_binprm structure.
221 * Return 0 if the hook is successful and permission is granted.
222 * @bprm_committing_creds:
223 * Prepare to install the new security attributes of a process being
224 * transformed by an execve operation, based on the old credentials
225 * pointed to by @current->cred and the information set in @bprm->cred by
226 * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
227 * This hook is a good place to perform state changes on the process such
228 * as closing open file descriptors to which access will no longer be
229 * granted when the attributes are changed. This is called immediately
230 * before commit_creds().
231 * @bprm_committed_creds:
232 * Tidy up after the installation of the new security attributes of a
233 * process being transformed by an execve operation. The new credentials
234 * have, by this point, been set to @current->cred. @bprm points to the
235 * linux_binprm structure. This hook is a good place to perform state
236 * changes on the process such as clearing out non-inheritable signal
237 * state. This is called immediately after commit_creds().
238 * @bprm_secureexec:
239 * Return a boolean value (0 or 1) indicating whether a "secure exec"
240 * is required. The flag is passed in the auxiliary table
241 * on the initial stack to the ELF interpreter to indicate whether libc
242 * should enable secure mode.
243 * @bprm contains the linux_binprm structure.
244 *
245 * Security hooks for filesystem operations.
246 *
247 * @sb_alloc_security:
248 * Allocate and attach a security structure to the sb->s_security field.
249 * The s_security field is initialized to NULL when the structure is
250 * allocated.
251 * @sb contains the super_block structure to be modified.
252 * Return 0 if operation was successful.
253 * @sb_free_security:
254 * Deallocate and clear the sb->s_security field.
255 * @sb contains the super_block structure to be modified.
256 * @sb_statfs:
257 * Check permission before obtaining filesystem statistics for the @mnt
258 * mountpoint.
259 * @dentry is a handle on the superblock for the filesystem.
260 * Return 0 if permission is granted.
261 * @sb_mount:
262 * Check permission before an object specified by @dev_name is mounted on
263 * the mount point named by @nd. For an ordinary mount, @dev_name
264 * identifies a device if the file system type requires a device. For a
265 * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
266 * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
267 * pathname of the object being mounted.
268 * @dev_name contains the name for object being mounted.
269 * @path contains the path for mount point object.
270 * @type contains the filesystem type.
271 * @flags contains the mount flags.
272 * @data contains the filesystem-specific data.
273 * Return 0 if permission is granted.
274 * @sb_copy_data:
275 * Allow mount option data to be copied prior to parsing by the filesystem,
276 * so that the security module can extract security-specific mount
277 * options cleanly (a filesystem may modify the data e.g. with strsep()).
278 * This also allows the original mount data to be stripped of security-
279 * specific options to avoid having to make filesystems aware of them.
280 * @type the type of filesystem being mounted.
281 * @orig the original mount data copied from userspace.
282 * @copy copied data which will be passed to the security module.
283 * Returns 0 if the copy was successful.
284 * @sb_remount:
285 * Extracts security system specific mount options and verifies no changes
286 * are being made to those options.
287 * @sb superblock being remounted
288 * @data contains the filesystem-specific data.
289 * Return 0 if permission is granted.
290 * @sb_umount:
291 * Check permission before the @mnt file system is unmounted.
292 * @mnt contains the mounted file system.
293 * @flags contains the unmount flags, e.g. MNT_FORCE.
294 * Return 0 if permission is granted.
295 * @sb_pivotroot:
296 * Check permission before pivoting the root filesystem.
297 * @old_path contains the path for the new location of the current root (put_old).
298 * @new_path contains the path for the new root (new_root).
299 * Return 0 if permission is granted.
300 * @sb_set_mnt_opts:
301 * Set the security relevant mount options used for a superblock
302 * @sb the superblock to set security mount options for
303 * @opts binary data structure containing all lsm mount data
304 * @sb_clone_mnt_opts:
305 * Copy all security options from a given superblock to another
306 * @oldsb old superblock which contain information to clone
307 * @newsb new superblock which needs filled in
308 * @sb_parse_opts_str:
309 * Parse a string of security data filling in the opts structure
310 * @options string containing all mount options known by the LSM
311 * @opts binary data structure usable by the LSM
312 * @dentry_init_security:
313 * Compute a context for a dentry as the inode is not yet available
314 * since NFSv4 has no label backed by an EA anyway.
315 * @dentry dentry to use in calculating the context.
316 * @mode mode used to determine resource type.
317 * @name name of the last path component used to create file
318 * @ctx pointer to place the pointer to the resulting context in.
319 * @ctxlen point to place the length of the resulting context.
320 *
321 *
322 * Security hooks for inode operations.
323 *
324 * @inode_alloc_security:
325 * Allocate and attach a security structure to @inode->i_security. The
326 * i_security field is initialized to NULL when the inode structure is
327 * allocated.
328 * @inode contains the inode structure.
329 * Return 0 if operation was successful.
330 * @inode_free_security:
331 * @inode contains the inode structure.
332 * Deallocate the inode security structure and set @inode->i_security to
333 * NULL.
334 * @inode_init_security:
335 * Obtain the security attribute name suffix and value to set on a newly
336 * created inode and set up the incore security field for the new inode.
337 * This hook is called by the fs code as part of the inode creation
338 * transaction and provides for atomic labeling of the inode, unlike
339 * the post_create/mkdir/... hooks called by the VFS. The hook function
340 * is expected to allocate the name and value via kmalloc, with the caller
341 * being responsible for calling kfree after using them.
342 * If the security module does not use security attributes or does
343 * not wish to put a security attribute on this particular inode,
344 * then it should return -EOPNOTSUPP to skip this processing.
345 * @inode contains the inode structure of the newly created inode.
346 * @dir contains the inode structure of the parent directory.
347 * @qstr contains the last path component of the new object
348 * @name will be set to the allocated name suffix (e.g. selinux).
349 * @value will be set to the allocated attribute value.
350 * @len will be set to the length of the value.
351 * Returns 0 if @name and @value have been successfully set,
352 * -EOPNOTSUPP if no security attribute is needed, or
353 * -ENOMEM on memory allocation failure.
354 * @inode_create:
355 * Check permission to create a regular file.
356 * @dir contains inode structure of the parent of the new file.
357 * @dentry contains the dentry structure for the file to be created.
358 * @mode contains the file mode of the file to be created.
359 * Return 0 if permission is granted.
360 * @inode_link:
361 * Check permission before creating a new hard link to a file.
362 * @old_dentry contains the dentry structure for an existing link to the file.
363 * @dir contains the inode structure of the parent directory of the new link.
364 * @new_dentry contains the dentry structure for the new link.
365 * Return 0 if permission is granted.
366 * @path_link:
367 * Check permission before creating a new hard link to a file.
368 * @old_dentry contains the dentry structure for an existing link
369 * to the file.
370 * @new_dir contains the path structure of the parent directory of
371 * the new link.
372 * @new_dentry contains the dentry structure for the new link.
373 * Return 0 if permission is granted.
374 * @inode_unlink:
375 * Check the permission to remove a hard link to a file.
376 * @dir contains the inode structure of parent directory of the file.
377 * @dentry contains the dentry structure for file to be unlinked.
378 * Return 0 if permission is granted.
379 * @path_unlink:
380 * Check the permission to remove a hard link to a file.
381 * @dir contains the path structure of parent directory of the file.
382 * @dentry contains the dentry structure for file to be unlinked.
383 * Return 0 if permission is granted.
384 * @inode_symlink:
385 * Check the permission to create a symbolic link to a file.
386 * @dir contains the inode structure of parent directory of the symbolic link.
387 * @dentry contains the dentry structure of the symbolic link.
388 * @old_name contains the pathname of file.
389 * Return 0 if permission is granted.
390 * @path_symlink:
391 * Check the permission to create a symbolic link to a file.
392 * @dir contains the path structure of parent directory of
393 * the symbolic link.
394 * @dentry contains the dentry structure of the symbolic link.
395 * @old_name contains the pathname of file.
396 * Return 0 if permission is granted.
397 * @inode_mkdir:
398 * Check permissions to create a new directory in the existing directory
399 * associated with inode structure @dir.
400 * @dir contains the inode structure of parent of the directory to be created.
401 * @dentry contains the dentry structure of new directory.
402 * @mode contains the mode of new directory.
403 * Return 0 if permission is granted.
404 * @path_mkdir:
405 * Check permissions to create a new directory in the existing directory
406 * associated with path structure @path.
407 * @dir contains the path structure of parent of the directory
408 * to be created.
409 * @dentry contains the dentry structure of new directory.
410 * @mode contains the mode of new directory.
411 * Return 0 if permission is granted.
412 * @inode_rmdir:
413 * Check the permission to remove a directory.
414 * @dir contains the inode structure of parent of the directory to be removed.
415 * @dentry contains the dentry structure of directory to be removed.
416 * Return 0 if permission is granted.
417 * @path_rmdir:
418 * Check the permission to remove a directory.
419 * @dir contains the path structure of parent of the directory to be
420 * removed.
421 * @dentry contains the dentry structure of directory to be removed.
422 * Return 0 if permission is granted.
423 * @inode_mknod:
424 * Check permissions when creating a special file (or a socket or a fifo
425 * file created via the mknod system call). Note that if mknod operation
426 * is being done for a regular file, then the create hook will be called
427 * and not this hook.
428 * @dir contains the inode structure of parent of the new file.
429 * @dentry contains the dentry structure of the new file.
430 * @mode contains the mode of the new file.
431 * @dev contains the device number.
432 * Return 0 if permission is granted.
433 * @path_mknod:
434 * Check permissions when creating a file. Note that this hook is called
435 * even if mknod operation is being done for a regular file.
436 * @dir contains the path structure of parent of the new file.
437 * @dentry contains the dentry structure of the new file.
438 * @mode contains the mode of the new file.
439 * @dev contains the undecoded device number. Use new_decode_dev() to get
440 * the decoded device number.
441 * Return 0 if permission is granted.
442 * @inode_rename:
443 * Check for permission to rename a file or directory.
444 * @old_dir contains the inode structure for parent of the old link.
445 * @old_dentry contains the dentry structure of the old link.
446 * @new_dir contains the inode structure for parent of the new link.
447 * @new_dentry contains the dentry structure of the new link.
448 * Return 0 if permission is granted.
449 * @path_rename:
450 * Check for permission to rename a file or directory.
451 * @old_dir contains the path structure for parent of the old link.
452 * @old_dentry contains the dentry structure of the old link.
453 * @new_dir contains the path structure for parent of the new link.
454 * @new_dentry contains the dentry structure of the new link.
455 * Return 0 if permission is granted.
456 * @path_chmod:
457 * Check for permission to change DAC's permission of a file or directory.
458 * @dentry contains the dentry structure.
459 * @mnt contains the vfsmnt structure.
460 * @mode contains DAC's mode.
461 * Return 0 if permission is granted.
462 * @path_chown:
463 * Check for permission to change owner/group of a file or directory.
464 * @path contains the path structure.
465 * @uid contains new owner's ID.
466 * @gid contains new group's ID.
467 * Return 0 if permission is granted.
468 * @path_chroot:
469 * Check for permission to change root directory.
470 * @path contains the path structure.
471 * Return 0 if permission is granted.
472 * @inode_readlink:
473 * Check the permission to read the symbolic link.
474 * @dentry contains the dentry structure for the file link.
475 * Return 0 if permission is granted.
476 * @inode_follow_link:
477 * Check permission to follow a symbolic link when looking up a pathname.
478 * @dentry contains the dentry structure for the link.
479 * @inode contains the inode, which itself is not stable in RCU-walk
480 * @rcu indicates whether we are in RCU-walk mode.
481 * Return 0 if permission is granted.
482 * @inode_permission:
483 * Check permission before accessing an inode. This hook is called by the
484 * existing Linux permission function, so a security module can use it to
485 * provide additional checking for existing Linux permission checks.
486 * Notice that this hook is called when a file is opened (as well as many
487 * other operations), whereas the file_security_ops permission hook is
488 * called when the actual read/write operations are performed.
489 * @inode contains the inode structure to check.
490 * @mask contains the permission mask.
491 * Return 0 if permission is granted.
492 * @inode_setattr:
493 * Check permission before setting file attributes. Note that the kernel
494 * call to notify_change is performed from several locations, whenever
495 * file attributes change (such as when a file is truncated, chown/chmod
496 * operations, transferring disk quotas, etc).
497 * @dentry contains the dentry structure for the file.
498 * @attr is the iattr structure containing the new file attributes.
499 * Return 0 if permission is granted.
500 * @path_truncate:
501 * Check permission before truncating a file.
502 * @path contains the path structure for the file.
503 * Return 0 if permission is granted.
504 * @inode_getattr:
505 * Check permission before obtaining file attributes.
506 * @mnt is the vfsmount where the dentry was looked up
507 * @dentry contains the dentry structure for the file.
508 * Return 0 if permission is granted.
509 * @inode_setxattr:
510 * Check permission before setting the extended attributes
511 * @value identified by @name for @dentry.
512 * Return 0 if permission is granted.
513 * @inode_post_setxattr:
514 * Update inode security field after successful setxattr operation.
515 * @value identified by @name for @dentry.
516 * @inode_getxattr:
517 * Check permission before obtaining the extended attributes
518 * identified by @name for @dentry.
519 * Return 0 if permission is granted.
520 * @inode_listxattr:
521 * Check permission before obtaining the list of extended attribute
522 * names for @dentry.
523 * Return 0 if permission is granted.
524 * @inode_removexattr:
525 * Check permission before removing the extended attribute
526 * identified by @name for @dentry.
527 * Return 0 if permission is granted.
528 * @inode_getsecurity:
529 * Retrieve a copy of the extended attribute representation of the
530 * security label associated with @name for @inode via @buffer. Note that
531 * @name is the remainder of the attribute name after the security prefix
532 * has been removed. @alloc is used to specify of the call should return a
533 * value via the buffer or just the value length Return size of buffer on
534 * success.
535 * @inode_setsecurity:
536 * Set the security label associated with @name for @inode from the
537 * extended attribute value @value. @size indicates the size of the
538 * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
539 * Note that @name is the remainder of the attribute name after the
540 * security. prefix has been removed.
541 * Return 0 on success.
542 * @inode_listsecurity:
543 * Copy the extended attribute names for the security labels
544 * associated with @inode into @buffer. The maximum size of @buffer
545 * is specified by @buffer_size. @buffer may be NULL to request
546 * the size of the buffer required.
547 * Returns number of bytes used/required on success.
548 * @inode_need_killpriv:
549 * Called when an inode has been changed.
550 * @dentry is the dentry being changed.
551 * Return <0 on error to abort the inode change operation.
552 * Return 0 if inode_killpriv does not need to be called.
553 * Return >0 if inode_killpriv does need to be called.
554 * @inode_killpriv:
555 * The setuid bit is being removed. Remove similar security labels.
556 * Called with the dentry->d_inode->i_mutex held.
557 * @dentry is the dentry being changed.
558 * Return 0 on success. If error is returned, then the operation
559 * causing setuid bit removal is failed.
560 * @inode_getsecid:
561 * Get the secid associated with the node.
562 * @inode contains a pointer to the inode.
563 * @secid contains a pointer to the location where result will be saved.
564 * In case of failure, @secid will be set to zero.
565 *
566 * Security hooks for file operations
567 *
568 * @file_permission:
569 * Check file permissions before accessing an open file. This hook is
570 * called by various operations that read or write files. A security
571 * module can use this hook to perform additional checking on these
572 * operations, e.g. to revalidate permissions on use to support privilege
573 * bracketing or policy changes. Notice that this hook is used when the
574 * actual read/write operations are performed, whereas the
575 * inode_security_ops hook is called when a file is opened (as well as
576 * many other operations).
577 * Caveat: Although this hook can be used to revalidate permissions for
578 * various system call operations that read or write files, it does not
579 * address the revalidation of permissions for memory-mapped files.
580 * Security modules must handle this separately if they need such
581 * revalidation.
582 * @file contains the file structure being accessed.
583 * @mask contains the requested permissions.
584 * Return 0 if permission is granted.
585 * @file_alloc_security:
586 * Allocate and attach a security structure to the file->f_security field.
587 * The security field is initialized to NULL when the structure is first
588 * created.
589 * @file contains the file structure to secure.
590 * Return 0 if the hook is successful and permission is granted.
591 * @file_free_security:
592 * Deallocate and free any security structures stored in file->f_security.
593 * @file contains the file structure being modified.
594 * @file_ioctl:
595 * @file contains the file structure.
596 * @cmd contains the operation to perform.
597 * @arg contains the operational arguments.
598 * Check permission for an ioctl operation on @file. Note that @arg
599 * sometimes represents a user space pointer; in other cases, it may be a
600 * simple integer value. When @arg represents a user space pointer, it
601 * should never be used by the security module.
602 * Return 0 if permission is granted.
603 * @mmap_addr :
604 * Check permissions for a mmap operation at @addr.
605 * @addr contains virtual address that will be used for the operation.
606 * Return 0 if permission is granted.
607 * @mmap_file :
608 * Check permissions for a mmap operation. The @file may be NULL, e.g.
609 * if mapping anonymous memory.
610 * @file contains the file structure for file to map (may be NULL).
611 * @reqprot contains the protection requested by the application.
612 * @prot contains the protection that will be applied by the kernel.
613 * @flags contains the operational flags.
614 * Return 0 if permission is granted.
615 * @file_mprotect:
616 * Check permissions before changing memory access permissions.
617 * @vma contains the memory region to modify.
618 * @reqprot contains the protection requested by the application.
619 * @prot contains the protection that will be applied by the kernel.
620 * Return 0 if permission is granted.
621 * @file_lock:
622 * Check permission before performing file locking operations.
623 * Note: this hook mediates both flock and fcntl style locks.
624 * @file contains the file structure.
625 * @cmd contains the posix-translated lock operation to perform
626 * (e.g. F_RDLCK, F_WRLCK).
627 * Return 0 if permission is granted.
628 * @file_fcntl:
629 * Check permission before allowing the file operation specified by @cmd
630 * from being performed on the file @file. Note that @arg sometimes
631 * represents a user space pointer; in other cases, it may be a simple
632 * integer value. When @arg represents a user space pointer, it should
633 * never be used by the security module.
634 * @file contains the file structure.
635 * @cmd contains the operation to be performed.
636 * @arg contains the operational arguments.
637 * Return 0 if permission is granted.
638 * @file_set_fowner:
639 * Save owner security information (typically from current->security) in
640 * file->f_security for later use by the send_sigiotask hook.
641 * @file contains the file structure to update.
642 * Return 0 on success.
643 * @file_send_sigiotask:
644 * Check permission for the file owner @fown to send SIGIO or SIGURG to the
645 * process @tsk. Note that this hook is sometimes called from interrupt.
646 * Note that the fown_struct, @fown, is never outside the context of a
647 * struct file, so the file structure (and associated security information)
648 * can always be obtained:
649 * container_of(fown, struct file, f_owner)
650 * @tsk contains the structure of task receiving signal.
651 * @fown contains the file owner information.
652 * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
653 * Return 0 if permission is granted.
654 * @file_receive:
655 * This hook allows security modules to control the ability of a process
656 * to receive an open file descriptor via socket IPC.
657 * @file contains the file structure being received.
658 * Return 0 if permission is granted.
659 * @file_open
660 * Save open-time permission checking state for later use upon
661 * file_permission, and recheck access if anything has changed
662 * since inode_permission.
663 *
664 * Security hooks for task operations.
665 *
666 * @task_create:
667 * Check permission before creating a child process. See the clone(2)
668 * manual page for definitions of the @clone_flags.
669 * @clone_flags contains the flags indicating what should be shared.
670 * Return 0 if permission is granted.
671 * @task_free:
672 * @task task being freed
673 * Handle release of task-related resources. (Note that this can be called
674 * from interrupt context.)
675 * @cred_alloc_blank:
676 * @cred points to the credentials.
677 * @gfp indicates the atomicity of any memory allocations.
678 * Only allocate sufficient memory and attach to @cred such that
679 * cred_transfer() will not get ENOMEM.
680 * @cred_free:
681 * @cred points to the credentials.
682 * Deallocate and clear the cred->security field in a set of credentials.
683 * @cred_prepare:
684 * @new points to the new credentials.
685 * @old points to the original credentials.
686 * @gfp indicates the atomicity of any memory allocations.
687 * Prepare a new set of credentials by copying the data from the old set.
688 * @cred_transfer:
689 * @new points to the new credentials.
690 * @old points to the original credentials.
691 * Transfer data from original creds to new creds
692 * @kernel_act_as:
693 * Set the credentials for a kernel service to act as (subjective context).
694 * @new points to the credentials to be modified.
695 * @secid specifies the security ID to be set
696 * The current task must be the one that nominated @secid.
697 * Return 0 if successful.
698 * @kernel_create_files_as:
699 * Set the file creation context in a set of credentials to be the same as
700 * the objective context of the specified inode.
701 * @new points to the credentials to be modified.
702 * @inode points to the inode to use as a reference.
703 * The current task must be the one that nominated @inode.
704 * Return 0 if successful.
705 * @kernel_fw_from_file:
706 * Load firmware from userspace (not called for built-in firmware).
707 * @file contains the file structure pointing to the file containing
708 * the firmware to load. This argument will be NULL if the firmware
709 * was loaded via the uevent-triggered blob-based interface exposed
710 * by CONFIG_FW_LOADER_USER_HELPER.
711 * @buf pointer to buffer containing firmware contents.
712 * @size length of the firmware contents.
713 * Return 0 if permission is granted.
714 * @kernel_module_request:
715 * Ability to trigger the kernel to automatically upcall to userspace for
716 * userspace to load a kernel module with the given name.
717 * @kmod_name name of the module requested by the kernel
718 * Return 0 if successful.
719 * @kernel_module_from_file:
720 * Load a kernel module from userspace.
721 * @file contains the file structure pointing to the file containing
722 * the kernel module to load. If the module is being loaded from a blob,
723 * this argument will be NULL.
724 * Return 0 if permission is granted.
725 * @task_fix_setuid:
726 * Update the module's state after setting one or more of the user
727 * identity attributes of the current process. The @flags parameter
728 * indicates which of the set*uid system calls invoked this hook. If
729 * @new is the set of credentials that will be installed. Modifications
730 * should be made to this rather than to @current->cred.
731 * @old is the set of credentials that are being replaces
732 * @flags contains one of the LSM_SETID_* values.
733 * Return 0 on success.
734 * @task_setpgid:
735 * Check permission before setting the process group identifier of the
736 * process @p to @pgid.
737 * @p contains the task_struct for process being modified.
738 * @pgid contains the new pgid.
739 * Return 0 if permission is granted.
740 * @task_getpgid:
741 * Check permission before getting the process group identifier of the
742 * process @p.
743 * @p contains the task_struct for the process.
744 * Return 0 if permission is granted.
745 * @task_getsid:
746 * Check permission before getting the session identifier of the process
747 * @p.
748 * @p contains the task_struct for the process.
749 * Return 0 if permission is granted.
750 * @task_getsecid:
751 * Retrieve the security identifier of the process @p.
752 * @p contains the task_struct for the process and place is into @secid.
753 * In case of failure, @secid will be set to zero.
754 *
755 * @task_setnice:
756 * Check permission before setting the nice value of @p to @nice.
757 * @p contains the task_struct of process.
758 * @nice contains the new nice value.
759 * Return 0 if permission is granted.
760 * @task_setioprio
761 * Check permission before setting the ioprio value of @p to @ioprio.
762 * @p contains the task_struct of process.
763 * @ioprio contains the new ioprio value
764 * Return 0 if permission is granted.
765 * @task_getioprio
766 * Check permission before getting the ioprio value of @p.
767 * @p contains the task_struct of process.
768 * Return 0 if permission is granted.
769 * @task_setrlimit:
770 * Check permission before setting the resource limits of the current
771 * process for @resource to @new_rlim. The old resource limit values can
772 * be examined by dereferencing (current->signal->rlim + resource).
773 * @resource contains the resource whose limit is being set.
774 * @new_rlim contains the new limits for @resource.
775 * Return 0 if permission is granted.
776 * @task_setscheduler:
777 * Check permission before setting scheduling policy and/or parameters of
778 * process @p based on @policy and @lp.
779 * @p contains the task_struct for process.
780 * @policy contains the scheduling policy.
781 * @lp contains the scheduling parameters.
782 * Return 0 if permission is granted.
783 * @task_getscheduler:
784 * Check permission before obtaining scheduling information for process
785 * @p.
786 * @p contains the task_struct for process.
787 * Return 0 if permission is granted.
788 * @task_movememory
789 * Check permission before moving memory owned by process @p.
790 * @p contains the task_struct for process.
791 * Return 0 if permission is granted.
792 * @task_kill:
793 * Check permission before sending signal @sig to @p. @info can be NULL,
794 * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
795 * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
796 * from the kernel and should typically be permitted.
797 * SIGIO signals are handled separately by the send_sigiotask hook in
798 * file_security_ops.
799 * @p contains the task_struct for process.
800 * @info contains the signal information.
801 * @sig contains the signal value.
802 * @secid contains the sid of the process where the signal originated
803 * Return 0 if permission is granted.
804 * @task_wait:
805 * Check permission before allowing a process to reap a child process @p
806 * and collect its status information.
807 * @p contains the task_struct for process.
808 * Return 0 if permission is granted.
809 * @task_prctl:
810 * Check permission before performing a process control operation on the
811 * current process.
812 * @option contains the operation.
813 * @arg2 contains a argument.
814 * @arg3 contains a argument.
815 * @arg4 contains a argument.
816 * @arg5 contains a argument.
817 * Return -ENOSYS if no-one wanted to handle this op, any other value to
818 * cause prctl() to return immediately with that value.
819 * @task_to_inode:
820 * Set the security attributes for an inode based on an associated task's
821 * security attributes, e.g. for /proc/pid inodes.
822 * @p contains the task_struct for the task.
823 * @inode contains the inode structure for the inode.
824 *
825 * Security hooks for Netlink messaging.
826 *
827 * @netlink_send:
828 * Save security information for a netlink message so that permission
829 * checking can be performed when the message is processed. The security
830 * information can be saved using the eff_cap field of the
831 * netlink_skb_parms structure. Also may be used to provide fine
832 * grained control over message transmission.
833 * @sk associated sock of task sending the message.
834 * @skb contains the sk_buff structure for the netlink message.
835 * Return 0 if the information was successfully saved and message
836 * is allowed to be transmitted.
837 *
838 * Security hooks for Unix domain networking.
839 *
840 * @unix_stream_connect:
841 * Check permissions before establishing a Unix domain stream connection
842 * between @sock and @other.
843 * @sock contains the sock structure.
844 * @other contains the peer sock structure.
845 * @newsk contains the new sock structure.
846 * Return 0 if permission is granted.
847 * @unix_may_send:
848 * Check permissions before connecting or sending datagrams from @sock to
849 * @other.
850 * @sock contains the socket structure.
851 * @other contains the peer socket structure.
852 * Return 0 if permission is granted.
853 *
854 * The @unix_stream_connect and @unix_may_send hooks were necessary because
855 * Linux provides an alternative to the conventional file name space for Unix
856 * domain sockets. Whereas binding and connecting to sockets in the file name
857 * space is mediated by the typical file permissions (and caught by the mknod
858 * and permission hooks in inode_security_ops), binding and connecting to
859 * sockets in the abstract name space is completely unmediated. Sufficient
860 * control of Unix domain sockets in the abstract name space isn't possible
861 * using only the socket layer hooks, since we need to know the actual target
862 * socket, which is not looked up until we are inside the af_unix code.
863 *
864 * Security hooks for socket operations.
865 *
866 * @socket_create:
867 * Check permissions prior to creating a new socket.
868 * @family contains the requested protocol family.
869 * @type contains the requested communications type.
870 * @protocol contains the requested protocol.
871 * @kern set to 1 if a kernel socket.
872 * Return 0 if permission is granted.
873 * @socket_post_create:
874 * This hook allows a module to update or allocate a per-socket security
875 * structure. Note that the security field was not added directly to the
876 * socket structure, but rather, the socket security information is stored
877 * in the associated inode. Typically, the inode alloc_security hook will
878 * allocate and and attach security information to
879 * sock->inode->i_security. This hook may be used to update the
880 * sock->inode->i_security field with additional information that wasn't
881 * available when the inode was allocated.
882 * @sock contains the newly created socket structure.
883 * @family contains the requested protocol family.
884 * @type contains the requested communications type.
885 * @protocol contains the requested protocol.
886 * @kern set to 1 if a kernel socket.
887 * @socket_bind:
888 * Check permission before socket protocol layer bind operation is
889 * performed and the socket @sock is bound to the address specified in the
890 * @address parameter.
891 * @sock contains the socket structure.
892 * @address contains the address to bind to.
893 * @addrlen contains the length of address.
894 * Return 0 if permission is granted.
895 * @socket_connect:
896 * Check permission before socket protocol layer connect operation
897 * attempts to connect socket @sock to a remote address, @address.
898 * @sock contains the socket structure.
899 * @address contains the address of remote endpoint.
900 * @addrlen contains the length of address.
901 * Return 0 if permission is granted.
902 * @socket_listen:
903 * Check permission before socket protocol layer listen operation.
904 * @sock contains the socket structure.
905 * @backlog contains the maximum length for the pending connection queue.
906 * Return 0 if permission is granted.
907 * @socket_accept:
908 * Check permission before accepting a new connection. Note that the new
909 * socket, @newsock, has been created and some information copied to it,
910 * but the accept operation has not actually been performed.
911 * @sock contains the listening socket structure.
912 * @newsock contains the newly created server socket for connection.
913 * Return 0 if permission is granted.
914 * @socket_sendmsg:
915 * Check permission before transmitting a message to another socket.
916 * @sock contains the socket structure.
917 * @msg contains the message to be transmitted.
918 * @size contains the size of message.
919 * Return 0 if permission is granted.
920 * @socket_recvmsg:
921 * Check permission before receiving a message from a socket.
922 * @sock contains the socket structure.
923 * @msg contains the message structure.
924 * @size contains the size of message structure.
925 * @flags contains the operational flags.
926 * Return 0 if permission is granted.
927 * @socket_getsockname:
928 * Check permission before the local address (name) of the socket object
929 * @sock is retrieved.
930 * @sock contains the socket structure.
931 * Return 0 if permission is granted.
932 * @socket_getpeername:
933 * Check permission before the remote address (name) of a socket object
934 * @sock is retrieved.
935 * @sock contains the socket structure.
936 * Return 0 if permission is granted.
937 * @socket_getsockopt:
938 * Check permissions before retrieving the options associated with socket
939 * @sock.
940 * @sock contains the socket structure.
941 * @level contains the protocol level to retrieve option from.
942 * @optname contains the name of option to retrieve.
943 * Return 0 if permission is granted.
944 * @socket_setsockopt:
945 * Check permissions before setting the options associated with socket
946 * @sock.
947 * @sock contains the socket structure.
948 * @level contains the protocol level to set options for.
949 * @optname contains the name of the option to set.
950 * Return 0 if permission is granted.
951 * @socket_shutdown:
952 * Checks permission before all or part of a connection on the socket
953 * @sock is shut down.
954 * @sock contains the socket structure.
955 * @how contains the flag indicating how future sends and receives are handled.
956 * Return 0 if permission is granted.
957 * @socket_sock_rcv_skb:
958 * Check permissions on incoming network packets. This hook is distinct
959 * from Netfilter's IP input hooks since it is the first time that the
960 * incoming sk_buff @skb has been associated with a particular socket, @sk.
961 * Must not sleep inside this hook because some callers hold spinlocks.
962 * @sk contains the sock (not socket) associated with the incoming sk_buff.
963 * @skb contains the incoming network data.
964 * @socket_getpeersec_stream:
965 * This hook allows the security module to provide peer socket security
966 * state for unix or connected tcp sockets to userspace via getsockopt
967 * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
968 * socket is associated with an ipsec SA.
969 * @sock is the local socket.
970 * @optval userspace memory where the security state is to be copied.
971 * @optlen userspace int where the module should copy the actual length
972 * of the security state.
973 * @len as input is the maximum length to copy to userspace provided
974 * by the caller.
975 * Return 0 if all is well, otherwise, typical getsockopt return
976 * values.
977 * @socket_getpeersec_dgram:
978 * This hook allows the security module to provide peer socket security
979 * state for udp sockets on a per-packet basis to userspace via
980 * getsockopt SO_GETPEERSEC. The application must first have indicated
981 * the IP_PASSSEC option via getsockopt. It can then retrieve the
982 * security state returned by this hook for a packet via the SCM_SECURITY
983 * ancillary message type.
984 * @skb is the skbuff for the packet being queried
985 * @secdata is a pointer to a buffer in which to copy the security data
986 * @seclen is the maximum length for @secdata
987 * Return 0 on success, error on failure.
988 * @sk_alloc_security:
989 * Allocate and attach a security structure to the sk->sk_security field,
990 * which is used to copy security attributes between local stream sockets.
991 * @sk_free_security:
992 * Deallocate security structure.
993 * @sk_clone_security:
994 * Clone/copy security structure.
995 * @sk_getsecid:
996 * Retrieve the LSM-specific secid for the sock to enable caching of network
997 * authorizations.
998 * @sock_graft:
999 * Sets the socket's isec sid to the sock's sid.
1000 * @inet_conn_request:
1001 * Sets the openreq's sid to socket's sid with MLS portion taken from peer sid.
1002 * @inet_csk_clone:
1003 * Sets the new child socket's sid to the openreq sid.
1004 * @inet_conn_established:
1005 * Sets the connection's peersid to the secmark on skb.
1006 * @secmark_relabel_packet:
1007 * check if the process should be allowed to relabel packets to the given secid
1008 * @security_secmark_refcount_inc
1009 * tells the LSM to increment the number of secmark labeling rules loaded
1010 * @security_secmark_refcount_dec
1011 * tells the LSM to decrement the number of secmark labeling rules loaded
1012 * @req_classify_flow:
1013 * Sets the flow's sid to the openreq sid.
1014 * @tun_dev_alloc_security:
1015 * This hook allows a module to allocate a security structure for a TUN
1016 * device.
1017 * @security pointer to a security structure pointer.
1018 * Returns a zero on success, negative values on failure.
1019 * @tun_dev_free_security:
1020 * This hook allows a module to free the security structure for a TUN
1021 * device.
1022 * @security pointer to the TUN device's security structure
1023 * @tun_dev_create:
1024 * Check permissions prior to creating a new TUN device.
1025 * @tun_dev_attach_queue:
1026 * Check permissions prior to attaching to a TUN device queue.
1027 * @security pointer to the TUN device's security structure.
1028 * @tun_dev_attach:
1029 * This hook can be used by the module to update any security state
1030 * associated with the TUN device's sock structure.
1031 * @sk contains the existing sock structure.
1032 * @security pointer to the TUN device's security structure.
1033 * @tun_dev_open:
1034 * This hook can be used by the module to update any security state
1035 * associated with the TUN device's security structure.
1036 * @security pointer to the TUN devices's security structure.
1037 * @skb_owned_by:
1038 * This hook sets the packet's owning sock.
1039 * @skb is the packet.
1040 * @sk the sock which owns the packet.
1041 *
1042 * Security hooks for XFRM operations.
1043 *
1044 * @xfrm_policy_alloc_security:
1045 * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
1046 * Database used by the XFRM system.
1047 * @sec_ctx contains the security context information being provided by
1048 * the user-level policy update program (e.g., setkey).
1049 * Allocate a security structure to the xp->security field; the security
1050 * field is initialized to NULL when the xfrm_policy is allocated.
1051 * Return 0 if operation was successful (memory to allocate, legal context)
1052 * @gfp is to specify the context for the allocation
1053 * @xfrm_policy_clone_security:
1054 * @old_ctx contains an existing xfrm_sec_ctx.
1055 * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
1056 * Allocate a security structure in new_ctxp that contains the
1057 * information from the old_ctx structure.
1058 * Return 0 if operation was successful (memory to allocate).
1059 * @xfrm_policy_free_security:
1060 * @ctx contains the xfrm_sec_ctx
1061 * Deallocate xp->security.
1062 * @xfrm_policy_delete_security:
1063 * @ctx contains the xfrm_sec_ctx.
1064 * Authorize deletion of xp->security.
1065 * @xfrm_state_alloc:
1066 * @x contains the xfrm_state being added to the Security Association
1067 * Database by the XFRM system.
1068 * @sec_ctx contains the security context information being provided by
1069 * the user-level SA generation program (e.g., setkey or racoon).
1070 * Allocate a security structure to the x->security field; the security
1071 * field is initialized to NULL when the xfrm_state is allocated. Set the
1072 * context to correspond to sec_ctx. Return 0 if operation was successful
1073 * (memory to allocate, legal context).
1074 * @xfrm_state_alloc_acquire:
1075 * @x contains the xfrm_state being added to the Security Association
1076 * Database by the XFRM system.
1077 * @polsec contains the policy's security context.
1078 * @secid contains the secid from which to take the mls portion of the
1079 * context.
1080 * Allocate a security structure to the x->security field; the security
1081 * field is initialized to NULL when the xfrm_state is allocated. Set the
1082 * context to correspond to secid. Return 0 if operation was successful
1083 * (memory to allocate, legal context).
1084 * @xfrm_state_free_security:
1085 * @x contains the xfrm_state.
1086 * Deallocate x->security.
1087 * @xfrm_state_delete_security:
1088 * @x contains the xfrm_state.
1089 * Authorize deletion of x->security.
1090 * @xfrm_policy_lookup:
1091 * @ctx contains the xfrm_sec_ctx for which the access control is being
1092 * checked.
1093 * @fl_secid contains the flow security label that is used to authorize
1094 * access to the policy xp.
1095 * @dir contains the direction of the flow (input or output).
1096 * Check permission when a flow selects a xfrm_policy for processing
1097 * XFRMs on a packet. The hook is called when selecting either a
1098 * per-socket policy or a generic xfrm policy.
1099 * Return 0 if permission is granted, -ESRCH otherwise, or -errno
1100 * on other errors.
1101 * @xfrm_state_pol_flow_match:
1102 * @x contains the state to match.
1103 * @xp contains the policy to check for a match.
1104 * @fl contains the flow to check for a match.
1105 * Return 1 if there is a match.
1106 * @xfrm_decode_session:
1107 * @skb points to skb to decode.
1108 * @secid points to the flow key secid to set.
1109 * @ckall says if all xfrms used should be checked for same secid.
1110 * Return 0 if ckall is zero or all xfrms used have the same secid.
1111 *
1112 * Security hooks affecting all Key Management operations
1113 *
1114 * @key_alloc:
1115 * Permit allocation of a key and assign security data. Note that key does
1116 * not have a serial number assigned at this point.
1117 * @key points to the key.
1118 * @flags is the allocation flags
1119 * Return 0 if permission is granted, -ve error otherwise.
1120 * @key_free:
1121 * Notification of destruction; free security data.
1122 * @key points to the key.
1123 * No return value.
1124 * @key_permission:
1125 * See whether a specific operational right is granted to a process on a
1126 * key.
1127 * @key_ref refers to the key (key pointer + possession attribute bit).
1128 * @cred points to the credentials to provide the context against which to
1129 * evaluate the security data on the key.
1130 * @perm describes the combination of permissions required of this key.
1131 * Return 0 if permission is granted, -ve error otherwise.
1132 * @key_getsecurity:
1133 * Get a textual representation of the security context attached to a key
1134 * for the purposes of honouring KEYCTL_GETSECURITY. This function
1135 * allocates the storage for the NUL-terminated string and the caller
1136 * should free it.
1137 * @key points to the key to be queried.
1138 * @_buffer points to a pointer that should be set to point to the
1139 * resulting string (if no label or an error occurs).
1140 * Return the length of the string (including terminating NUL) or -ve if
1141 * an error.
1142 * May also return 0 (and a NULL buffer pointer) if there is no label.
1143 *
1144 * Security hooks affecting all System V IPC operations.
1145 *
1146 * @ipc_permission:
1147 * Check permissions for access to IPC
1148 * @ipcp contains the kernel IPC permission structure
1149 * @flag contains the desired (requested) permission set
1150 * Return 0 if permission is granted.
1151 * @ipc_getsecid:
1152 * Get the secid associated with the ipc object.
1153 * @ipcp contains the kernel IPC permission structure.
1154 * @secid contains a pointer to the location where result will be saved.
1155 * In case of failure, @secid will be set to zero.
1156 *
1157 * Security hooks for individual messages held in System V IPC message queues
1158 * @msg_msg_alloc_security:
1159 * Allocate and attach a security structure to the msg->security field.
1160 * The security field is initialized to NULL when the structure is first
1161 * created.
1162 * @msg contains the message structure to be modified.
1163 * Return 0 if operation was successful and permission is granted.
1164 * @msg_msg_free_security:
1165 * Deallocate the security structure for this message.
1166 * @msg contains the message structure to be modified.
1167 *
1168 * Security hooks for System V IPC Message Queues
1169 *
1170 * @msg_queue_alloc_security:
1171 * Allocate and attach a security structure to the
1172 * msq->q_perm.security field. The security field is initialized to
1173 * NULL when the structure is first created.
1174 * @msq contains the message queue structure to be modified.
1175 * Return 0 if operation was successful and permission is granted.
1176 * @msg_queue_free_security:
1177 * Deallocate security structure for this message queue.
1178 * @msq contains the message queue structure to be modified.
1179 * @msg_queue_associate:
1180 * Check permission when a message queue is requested through the
1181 * msgget system call. This hook is only called when returning the
1182 * message queue identifier for an existing message queue, not when a
1183 * new message queue is created.
1184 * @msq contains the message queue to act upon.
1185 * @msqflg contains the operation control flags.
1186 * Return 0 if permission is granted.
1187 * @msg_queue_msgctl:
1188 * Check permission when a message control operation specified by @cmd
1189 * is to be performed on the message queue @msq.
1190 * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
1191 * @msq contains the message queue to act upon. May be NULL.
1192 * @cmd contains the operation to be performed.
1193 * Return 0 if permission is granted.
1194 * @msg_queue_msgsnd:
1195 * Check permission before a message, @msg, is enqueued on the message
1196 * queue, @msq.
1197 * @msq contains the message queue to send message to.
1198 * @msg contains the message to be enqueued.
1199 * @msqflg contains operational flags.
1200 * Return 0 if permission is granted.
1201 * @msg_queue_msgrcv:
1202 * Check permission before a message, @msg, is removed from the message
1203 * queue, @msq. The @target task structure contains a pointer to the
1204 * process that will be receiving the message (not equal to the current
1205 * process when inline receives are being performed).
1206 * @msq contains the message queue to retrieve message from.
1207 * @msg contains the message destination.
1208 * @target contains the task structure for recipient process.
1209 * @type contains the type of message requested.
1210 * @mode contains the operational flags.
1211 * Return 0 if permission is granted.
1212 *
1213 * Security hooks for System V Shared Memory Segments
1214 *
1215 * @shm_alloc_security:
1216 * Allocate and attach a security structure to the shp->shm_perm.security
1217 * field. The security field is initialized to NULL when the structure is
1218 * first created.
1219 * @shp contains the shared memory structure to be modified.
1220 * Return 0 if operation was successful and permission is granted.
1221 * @shm_free_security:
1222 * Deallocate the security struct for this memory segment.
1223 * @shp contains the shared memory structure to be modified.
1224 * @shm_associate:
1225 * Check permission when a shared memory region is requested through the
1226 * shmget system call. This hook is only called when returning the shared
1227 * memory region identifier for an existing region, not when a new shared
1228 * memory region is created.
1229 * @shp contains the shared memory structure to be modified.
1230 * @shmflg contains the operation control flags.
1231 * Return 0 if permission is granted.
1232 * @shm_shmctl:
1233 * Check permission when a shared memory control operation specified by
1234 * @cmd is to be performed on the shared memory region @shp.
1235 * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
1236 * @shp contains shared memory structure to be modified.
1237 * @cmd contains the operation to be performed.
1238 * Return 0 if permission is granted.
1239 * @shm_shmat:
1240 * Check permissions prior to allowing the shmat system call to attach the
1241 * shared memory segment @shp to the data segment of the calling process.
1242 * The attaching address is specified by @shmaddr.
1243 * @shp contains the shared memory structure to be modified.
1244 * @shmaddr contains the address to attach memory region to.
1245 * @shmflg contains the operational flags.
1246 * Return 0 if permission is granted.
1247 *
1248 * Security hooks for System V Semaphores
1249 *
1250 * @sem_alloc_security:
1251 * Allocate and attach a security structure to the sma->sem_perm.security
1252 * field. The security field is initialized to NULL when the structure is
1253 * first created.
1254 * @sma contains the semaphore structure
1255 * Return 0 if operation was successful and permission is granted.
1256 * @sem_free_security:
1257 * deallocate security struct for this semaphore
1258 * @sma contains the semaphore structure.
1259 * @sem_associate:
1260 * Check permission when a semaphore is requested through the semget
1261 * system call. This hook is only called when returning the semaphore
1262 * identifier for an existing semaphore, not when a new one must be
1263 * created.
1264 * @sma contains the semaphore structure.
1265 * @semflg contains the operation control flags.
1266 * Return 0 if permission is granted.
1267 * @sem_semctl:
1268 * Check permission when a semaphore operation specified by @cmd is to be
1269 * performed on the semaphore @sma. The @sma may be NULL, e.g. for
1270 * IPC_INFO or SEM_INFO.
1271 * @sma contains the semaphore structure. May be NULL.
1272 * @cmd contains the operation to be performed.
1273 * Return 0 if permission is granted.
1274 * @sem_semop
1275 * Check permissions before performing operations on members of the
1276 * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
1277 * may be modified.
1278 * @sma contains the semaphore structure.
1279 * @sops contains the operations to perform.
1280 * @nsops contains the number of operations to perform.
1281 * @alter contains the flag indicating whether changes are to be made.
1282 * Return 0 if permission is granted.
1283 *
1284 * @binder_set_context_mgr
1285 * Check whether @mgr is allowed to be the binder context manager.
1286 * @mgr contains the task_struct for the task being registered.
1287 * Return 0 if permission is granted.
1288 * @binder_transaction
1289 * Check whether @from is allowed to invoke a binder transaction call
1290 * to @to.
1291 * @from contains the task_struct for the sending task.
1292 * @to contains the task_struct for the receiving task.
1293 * @binder_transfer_binder
1294 * Check whether @from is allowed to transfer a binder reference to @to.
1295 * @from contains the task_struct for the sending task.
1296 * @to contains the task_struct for the receiving task.
1297 * @binder_transfer_file
1298 * Check whether @from is allowed to transfer @file to @to.
1299 * @from contains the task_struct for the sending task.
1300 * @file contains the struct file being transferred.
1301 * @to contains the task_struct for the receiving task.
1302 *
1303 * @ptrace_access_check:
1304 * Check permission before allowing the current process to trace the
1305 * @child process.
1306 * Security modules may also want to perform a process tracing check
1307 * during an execve in the set_security or apply_creds hooks of
1308 * tracing check during an execve in the bprm_set_creds hook of
1309 * binprm_security_ops if the process is being traced and its security
1310 * attributes would be changed by the execve.
1311 * @child contains the task_struct structure for the target process.
1312 * @mode contains the PTRACE_MODE flags indicating the form of access.
1313 * Return 0 if permission is granted.
1314 * @ptrace_traceme:
1315 * Check that the @parent process has sufficient permission to trace the
1316 * current process before allowing the current process to present itself
1317 * to the @parent process for tracing.
1318 * @parent contains the task_struct structure for debugger process.
1319 * Return 0 if permission is granted.
1320 * @capget:
1321 * Get the @effective, @inheritable, and @permitted capability sets for
1322 * the @target process. The hook may also perform permission checking to
1323 * determine if the current process is allowed to see the capability sets
1324 * of the @target process.
1325 * @target contains the task_struct structure for target process.
1326 * @effective contains the effective capability set.
1327 * @inheritable contains the inheritable capability set.
1328 * @permitted contains the permitted capability set.
1329 * Return 0 if the capability sets were successfully obtained.
1330 * @capset:
1331 * Set the @effective, @inheritable, and @permitted capability sets for
1332 * the current process.
1333 * @new contains the new credentials structure for target process.
1334 * @old contains the current credentials structure for target process.
1335 * @effective contains the effective capability set.
1336 * @inheritable contains the inheritable capability set.
1337 * @permitted contains the permitted capability set.
1338 * Return 0 and update @new if permission is granted.
1339 * @capable:
1340 * Check whether the @tsk process has the @cap capability in the indicated
1341 * credentials.
1342 * @cred contains the credentials to use.
1343 * @ns contains the user namespace we want the capability in
1344 * @cap contains the capability <include/linux/capability.h>.
1345 * @audit: Whether to write an audit message or not
1346 * Return 0 if the capability is granted for @tsk.
1347 * @syslog:
1348 * Check permission before accessing the kernel message ring or changing
1349 * logging to the console.
1350 * See the syslog(2) manual page for an explanation of the @type values.
1351 * @type contains the type of action.
1352 * @from_file indicates the context of action (if it came from /proc).
1353 * Return 0 if permission is granted.
1354 * @settime:
1355 * Check permission to change the system time.
1356 * struct timespec and timezone are defined in include/linux/time.h
1357 * @ts contains new time
1358 * @tz contains new timezone
1359 * Return 0 if permission is granted.
1360 * @vm_enough_memory:
1361 * Check permissions for allocating a new virtual mapping.
1362 * @mm contains the mm struct it is being added to.
1363 * @pages contains the number of pages.
1364 * Return 0 if permission is granted.
1365 *
1366 * @ismaclabel:
1367 * Check if the extended attribute specified by @name
1368 * represents a MAC label. Returns 1 if name is a MAC
1369 * attribute otherwise returns 0.
1370 * @name full extended attribute name to check against
1371 * LSM as a MAC label.
1372 *
1373 * @secid_to_secctx:
1374 * Convert secid to security context. If secdata is NULL the length of
1375 * the result will be returned in seclen, but no secdata will be returned.
1376 * This does mean that the length could change between calls to check the
1377 * length and the next call which actually allocates and returns the secdata.
1378 * @secid contains the security ID.
1379 * @secdata contains the pointer that stores the converted security context.
1380 * @seclen pointer which contains the length of the data
1381 * @secctx_to_secid:
1382 * Convert security context to secid.
1383 * @secid contains the pointer to the generated security ID.
1384 * @secdata contains the security context.
1385 *
1386 * @release_secctx:
1387 * Release the security context.
1388 * @secdata contains the security context.
1389 * @seclen contains the length of the security context.
1390 *
1391 * Security hooks for Audit
1392 *
1393 * @audit_rule_init:
1394 * Allocate and initialize an LSM audit rule structure.
1395 * @field contains the required Audit action. Fields flags are defined in include/linux/audit.h
1396 * @op contains the operator the rule uses.
1397 * @rulestr contains the context where the rule will be applied to.
1398 * @lsmrule contains a pointer to receive the result.
1399 * Return 0 if @lsmrule has been successfully set,
1400 * -EINVAL in case of an invalid rule.
1401 *
1402 * @audit_rule_known:
1403 * Specifies whether given @rule contains any fields related to current LSM.
1404 * @rule contains the audit rule of interest.
1405 * Return 1 in case of relation found, 0 otherwise.
1406 *
1407 * @audit_rule_match:
1408 * Determine if given @secid matches a rule previously approved
1409 * by @audit_rule_known.
1410 * @secid contains the security id in question.
1411 * @field contains the field which relates to current LSM.
1412 * @op contains the operator that will be used for matching.
1413 * @rule points to the audit rule that will be checked against.
1414 * @actx points to the audit context associated with the check.
1415 * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
1416 *
1417 * @audit_rule_free:
1418 * Deallocate the LSM audit rule structure previously allocated by
1419 * audit_rule_init.
1420 * @rule contains the allocated rule
1421 *
1422 * @inode_notifysecctx:
1423 * Notify the security module of what the security context of an inode
1424 * should be. Initializes the incore security context managed by the
1425 * security module for this inode. Example usage: NFS client invokes
1426 * this hook to initialize the security context in its incore inode to the
1427 * value provided by the server for the file when the server returned the
1428 * file's attributes to the client.
1429 *
1430 * Must be called with inode->i_mutex locked.
1431 *
1432 * @inode we wish to set the security context of.
1433 * @ctx contains the string which we wish to set in the inode.
1434 * @ctxlen contains the length of @ctx.
1435 *
1436 * @inode_setsecctx:
1437 * Change the security context of an inode. Updates the
1438 * incore security context managed by the security module and invokes the
1439 * fs code as needed (via __vfs_setxattr_noperm) to update any backing
1440 * xattrs that represent the context. Example usage: NFS server invokes
1441 * this hook to change the security context in its incore inode and on the
1442 * backing filesystem to a value provided by the client on a SETATTR
1443 * operation.
1444 *
1445 * Must be called with inode->i_mutex locked.
1446 *
1447 * @dentry contains the inode we wish to set the security context of.
1448 * @ctx contains the string which we wish to set in the inode.
1449 * @ctxlen contains the length of @ctx.
1450 *
1451 * @inode_getsecctx:
1452 * On success, returns 0 and fills out @ctx and @ctxlen with the security
1453 * context for the given @inode.
1454 *
1455 * @inode we wish to get the security context of.
1456 * @ctx is a pointer in which to place the allocated security context.
1457 * @ctxlen points to the place to put the length of @ctx.
1458 * This is the main security structure.
1459 */
1460struct security_operations {
1461 char name[SECURITY_NAME_MAX + 1];
1462
1463 int (*binder_set_context_mgr) (struct task_struct *mgr);
1464 int (*binder_transaction) (struct task_struct *from,
1465 struct task_struct *to);
1466 int (*binder_transfer_binder) (struct task_struct *from,
1467 struct task_struct *to);
1468 int (*binder_transfer_file) (struct task_struct *from,
1469 struct task_struct *to, struct file *file);
1470
1471 int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
1472 int (*ptrace_traceme) (struct task_struct *parent);
1473 int (*capget) (struct task_struct *target,
1474 kernel_cap_t *effective,
1475 kernel_cap_t *inheritable, kernel_cap_t *permitted);
1476 int (*capset) (struct cred *new,
1477 const struct cred *old,
1478 const kernel_cap_t *effective,
1479 const kernel_cap_t *inheritable,
1480 const kernel_cap_t *permitted);
1481 int (*capable) (const struct cred *cred, struct user_namespace *ns,
1482 int cap, int audit);
1483 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
1484 int (*quota_on) (struct dentry *dentry);
1485 int (*syslog) (int type);
1486 int (*settime) (const struct timespec *ts, const struct timezone *tz);
1487 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
1488
1489 int (*bprm_set_creds) (struct linux_binprm *bprm);
1490 int (*bprm_check_security) (struct linux_binprm *bprm);
1491 int (*bprm_secureexec) (struct linux_binprm *bprm);
1492 void (*bprm_committing_creds) (struct linux_binprm *bprm);
1493 void (*bprm_committed_creds) (struct linux_binprm *bprm);
1494
1495 int (*sb_alloc_security) (struct super_block *sb);
1496 void (*sb_free_security) (struct super_block *sb);
1497 int (*sb_copy_data) (char *orig, char *copy);
1498 int (*sb_remount) (struct super_block *sb, void *data);
1499 int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
1500 int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
1501 int (*sb_statfs) (struct dentry *dentry);
1502 int (*sb_mount) (const char *dev_name, struct path *path,
1503 const char *type, unsigned long flags, void *data);
1504 int (*sb_umount) (struct vfsmount *mnt, int flags);
1505 int (*sb_pivotroot) (struct path *old_path,
1506 struct path *new_path);
1507 int (*sb_set_mnt_opts) (struct super_block *sb,
1508 struct security_mnt_opts *opts,
1509 unsigned long kern_flags,
1510 unsigned long *set_kern_flags);
1511 int (*sb_clone_mnt_opts) (const struct super_block *oldsb,
1512 struct super_block *newsb);
1513 int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts);
1514 int (*dentry_init_security) (struct dentry *dentry, int mode,
1515 struct qstr *name, void **ctx,
1516 u32 *ctxlen);
1517
1518
1519#ifdef CONFIG_SECURITY_PATH
1520 int (*path_unlink) (struct path *dir, struct dentry *dentry);
1521 int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode);
1522 int (*path_rmdir) (struct path *dir, struct dentry *dentry);
1523 int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode,
1524 unsigned int dev);
1525 int (*path_truncate) (struct path *path);
1526 int (*path_symlink) (struct path *dir, struct dentry *dentry,
1527 const char *old_name);
1528 int (*path_link) (struct dentry *old_dentry, struct path *new_dir,
1529 struct dentry *new_dentry);
1530 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
1531 struct path *new_dir, struct dentry *new_dentry);
1532 int (*path_chmod) (struct path *path, umode_t mode);
1533 int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid);
1534 int (*path_chroot) (struct path *path);
1535#endif
1536
1537 int (*inode_alloc_security) (struct inode *inode);
1538 void (*inode_free_security) (struct inode *inode);
1539 int (*inode_init_security) (struct inode *inode, struct inode *dir,
1540 const struct qstr *qstr, const char **name,
1541 void **value, size_t *len);
1542 int (*inode_create) (struct inode *dir,
1543 struct dentry *dentry, umode_t mode);
1544 int (*inode_link) (struct dentry *old_dentry,
1545 struct inode *dir, struct dentry *new_dentry);
1546 int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
1547 int (*inode_symlink) (struct inode *dir,
1548 struct dentry *dentry, const char *old_name);
1549 int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode);
1550 int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
1551 int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
1552 umode_t mode, dev_t dev);
1553 int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
1554 struct inode *new_dir, struct dentry *new_dentry);
1555 int (*inode_readlink) (struct dentry *dentry);
1556 int (*inode_follow_link) (struct dentry *dentry, struct inode *inode,
1557 bool rcu);
1558 int (*inode_permission) (struct inode *inode, int mask);
1559 int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
1560 int (*inode_getattr) (const struct path *path);
1561 int (*inode_setxattr) (struct dentry *dentry, const char *name,
1562 const void *value, size_t size, int flags);
1563 void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
1564 const void *value, size_t size, int flags);
1565 int (*inode_getxattr) (struct dentry *dentry, const char *name);
1566 int (*inode_listxattr) (struct dentry *dentry);
1567 int (*inode_removexattr) (struct dentry *dentry, const char *name);
1568 int (*inode_need_killpriv) (struct dentry *dentry);
1569 int (*inode_killpriv) (struct dentry *dentry);
1570 int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
1571 int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags);
1572 int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size);
1573 void (*inode_getsecid) (const struct inode *inode, u32 *secid);
1574
1575 int (*file_permission) (struct file *file, int mask);
1576 int (*file_alloc_security) (struct file *file);
1577 void (*file_free_security) (struct file *file);
1578 int (*file_ioctl) (struct file *file, unsigned int cmd,
1579 unsigned long arg);
1580 int (*mmap_addr) (unsigned long addr);
1581 int (*mmap_file) (struct file *file,
1582 unsigned long reqprot, unsigned long prot,
1583 unsigned long flags);
1584 int (*file_mprotect) (struct vm_area_struct *vma,
1585 unsigned long reqprot,
1586 unsigned long prot);
1587 int (*file_lock) (struct file *file, unsigned int cmd);
1588 int (*file_fcntl) (struct file *file, unsigned int cmd,
1589 unsigned long arg);
1590 void (*file_set_fowner) (struct file *file);
1591 int (*file_send_sigiotask) (struct task_struct *tsk,
1592 struct fown_struct *fown, int sig);
1593 int (*file_receive) (struct file *file);
1594 int (*file_open) (struct file *file, const struct cred *cred);
1595
1596 int (*task_create) (unsigned long clone_flags);
1597 void (*task_free) (struct task_struct *task);
1598 int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
1599 void (*cred_free) (struct cred *cred);
1600 int (*cred_prepare)(struct cred *new, const struct cred *old,
1601 gfp_t gfp);
1602 void (*cred_transfer)(struct cred *new, const struct cred *old);
1603 int (*kernel_act_as)(struct cred *new, u32 secid);
1604 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1605 int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
1606 int (*kernel_module_request)(char *kmod_name);
1607 int (*kernel_module_from_file)(struct file *file);
1608 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1609 int flags);
1610 int (*task_setpgid) (struct task_struct *p, pid_t pgid);
1611 int (*task_getpgid) (struct task_struct *p);
1612 int (*task_getsid) (struct task_struct *p);
1613 void (*task_getsecid) (struct task_struct *p, u32 *secid);
1614 int (*task_setnice) (struct task_struct *p, int nice);
1615 int (*task_setioprio) (struct task_struct *p, int ioprio);
1616 int (*task_getioprio) (struct task_struct *p);
1617 int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
1618 struct rlimit *new_rlim);
1619 int (*task_setscheduler) (struct task_struct *p);
1620 int (*task_getscheduler) (struct task_struct *p);
1621 int (*task_movememory) (struct task_struct *p);
1622 int (*task_kill) (struct task_struct *p,
1623 struct siginfo *info, int sig, u32 secid);
1624 int (*task_wait) (struct task_struct *p);
1625 int (*task_prctl) (int option, unsigned long arg2,
1626 unsigned long arg3, unsigned long arg4,
1627 unsigned long arg5);
1628 void (*task_to_inode) (struct task_struct *p, struct inode *inode);
1629
1630 int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
1631 void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid);
1632
1633 int (*msg_msg_alloc_security) (struct msg_msg *msg);
1634 void (*msg_msg_free_security) (struct msg_msg *msg);
1635
1636 int (*msg_queue_alloc_security) (struct msg_queue *msq);
1637 void (*msg_queue_free_security) (struct msg_queue *msq);
1638 int (*msg_queue_associate) (struct msg_queue *msq, int msqflg);
1639 int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd);
1640 int (*msg_queue_msgsnd) (struct msg_queue *msq,
1641 struct msg_msg *msg, int msqflg);
1642 int (*msg_queue_msgrcv) (struct msg_queue *msq,
1643 struct msg_msg *msg,
1644 struct task_struct *target,
1645 long type, int mode);
1646
1647 int (*shm_alloc_security) (struct shmid_kernel *shp);
1648 void (*shm_free_security) (struct shmid_kernel *shp);
1649 int (*shm_associate) (struct shmid_kernel *shp, int shmflg);
1650 int (*shm_shmctl) (struct shmid_kernel *shp, int cmd);
1651 int (*shm_shmat) (struct shmid_kernel *shp,
1652 char __user *shmaddr, int shmflg);
1653
1654 int (*sem_alloc_security) (struct sem_array *sma);
1655 void (*sem_free_security) (struct sem_array *sma);
1656 int (*sem_associate) (struct sem_array *sma, int semflg);
1657 int (*sem_semctl) (struct sem_array *sma, int cmd);
1658 int (*sem_semop) (struct sem_array *sma,
1659 struct sembuf *sops, unsigned nsops, int alter);
1660
1661 int (*netlink_send) (struct sock *sk, struct sk_buff *skb);
1662
1663 void (*d_instantiate) (struct dentry *dentry, struct inode *inode);
1664
1665 int (*getprocattr) (struct task_struct *p, char *name, char **value);
1666 int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size);
1667 int (*ismaclabel) (const char *name);
1668 int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen);
1669 int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
1670 void (*release_secctx) (char *secdata, u32 seclen);
1671
1672 int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
1673 int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
1674 int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
1675
1676#ifdef CONFIG_SECURITY_NETWORK
1677 int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk);
1678 int (*unix_may_send) (struct socket *sock, struct socket *other);
1679
1680 int (*socket_create) (int family, int type, int protocol, int kern);
1681 int (*socket_post_create) (struct socket *sock, int family,
1682 int type, int protocol, int kern);
1683 int (*socket_bind) (struct socket *sock,
1684 struct sockaddr *address, int addrlen);
1685 int (*socket_connect) (struct socket *sock,
1686 struct sockaddr *address, int addrlen);
1687 int (*socket_listen) (struct socket *sock, int backlog);
1688 int (*socket_accept) (struct socket *sock, struct socket *newsock);
1689 int (*socket_sendmsg) (struct socket *sock,
1690 struct msghdr *msg, int size);
1691 int (*socket_recvmsg) (struct socket *sock,
1692 struct msghdr *msg, int size, int flags);
1693 int (*socket_getsockname) (struct socket *sock);
1694 int (*socket_getpeername) (struct socket *sock);
1695 int (*socket_getsockopt) (struct socket *sock, int level, int optname);
1696 int (*socket_setsockopt) (struct socket *sock, int level, int optname);
1697 int (*socket_shutdown) (struct socket *sock, int how);
1698 int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb);
1699 int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
1700 int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid);
1701 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
1702 void (*sk_free_security) (struct sock *sk);
1703 void (*sk_clone_security) (const struct sock *sk, struct sock *newsk);
1704 void (*sk_getsecid) (struct sock *sk, u32 *secid);
1705 void (*sock_graft) (struct sock *sk, struct socket *parent);
1706 int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb,
1707 struct request_sock *req);
1708 void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
1709 void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
1710 int (*secmark_relabel_packet) (u32 secid);
1711 void (*secmark_refcount_inc) (void);
1712 void (*secmark_refcount_dec) (void);
1713 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
1714 int (*tun_dev_alloc_security) (void **security);
1715 void (*tun_dev_free_security) (void *security);
1716 int (*tun_dev_create) (void);
1717 int (*tun_dev_attach_queue) (void *security);
1718 int (*tun_dev_attach) (struct sock *sk, void *security);
1719 int (*tun_dev_open) (void *security);
1720#endif /* CONFIG_SECURITY_NETWORK */
1721
1722#ifdef CONFIG_SECURITY_NETWORK_XFRM
1723 int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
1724 struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
1725 int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
1726 void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
1727 int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
1728 int (*xfrm_state_alloc) (struct xfrm_state *x,
1729 struct xfrm_user_sec_ctx *sec_ctx);
1730 int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
1731 struct xfrm_sec_ctx *polsec,
1732 u32 secid);
1733 void (*xfrm_state_free_security) (struct xfrm_state *x);
1734 int (*xfrm_state_delete_security) (struct xfrm_state *x);
1735 int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
1736 int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
1737 struct xfrm_policy *xp,
1738 const struct flowi *fl);
1739 int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
1740#endif /* CONFIG_SECURITY_NETWORK_XFRM */
1741
1742 /* key management security hooks */
1743#ifdef CONFIG_KEYS
1744 int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags);
1745 void (*key_free) (struct key *key);
1746 int (*key_permission) (key_ref_t key_ref,
1747 const struct cred *cred,
1748 unsigned perm);
1749 int (*key_getsecurity)(struct key *key, char **_buffer);
1750#endif /* CONFIG_KEYS */
1751
1752#ifdef CONFIG_AUDIT
1753 int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule);
1754 int (*audit_rule_known) (struct audit_krule *krule);
1755 int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule,
1756 struct audit_context *actx);
1757 void (*audit_rule_free) (void *lsmrule);
1758#endif /* CONFIG_AUDIT */
1759};
1760
1761/* prototypes */ 181/* prototypes */
1762extern int security_init(void); 182extern int security_init(void);
1763extern int security_module_enable(struct security_operations *ops);
1764extern int register_security(struct security_operations *ops);
1765extern void __init security_fixup_ops(struct security_operations *ops);
1766
1767 183
1768/* Security operations */ 184/* Security operations */
1769int security_binder_set_context_mgr(struct task_struct *mgr); 185int security_binder_set_context_mgr(struct task_struct *mgr);
@@ -2049,7 +465,7 @@ static inline int security_settime(const struct timespec *ts,
2049 465
2050static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) 466static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
2051{ 467{
2052 return cap_vm_enough_memory(mm, pages); 468 return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
2053} 469}
2054 470
2055static inline int security_bprm_set_creds(struct linux_binprm *bprm) 471static inline int security_bprm_set_creds(struct linux_binprm *bprm)
@@ -2653,7 +1069,7 @@ static inline int security_setprocattr(struct task_struct *p, char *name, void *
2653 1069
2654static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) 1070static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
2655{ 1071{
2656 return cap_netlink_send(sk, skb); 1072 return 0;
2657} 1073}
2658 1074
2659static inline int security_ismaclabel(const char *name) 1075static inline int security_ismaclabel(const char *name)
@@ -3221,36 +1637,5 @@ static inline void free_secdata(void *secdata)
3221{ } 1637{ }
3222#endif /* CONFIG_SECURITY */ 1638#endif /* CONFIG_SECURITY */
3223 1639
3224#ifdef CONFIG_SECURITY_YAMA
3225extern int yama_ptrace_access_check(struct task_struct *child,
3226 unsigned int mode);
3227extern int yama_ptrace_traceme(struct task_struct *parent);
3228extern void yama_task_free(struct task_struct *task);
3229extern int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
3230 unsigned long arg4, unsigned long arg5);
3231#else
3232static inline int yama_ptrace_access_check(struct task_struct *child,
3233 unsigned int mode)
3234{
3235 return 0;
3236}
3237
3238static inline int yama_ptrace_traceme(struct task_struct *parent)
3239{
3240 return 0;
3241}
3242
3243static inline void yama_task_free(struct task_struct *task)
3244{
3245}
3246
3247static inline int yama_task_prctl(int option, unsigned long arg2,
3248 unsigned long arg3, unsigned long arg4,
3249 unsigned long arg5)
3250{
3251 return -ENOSYS;
3252}
3253#endif /* CONFIG_SECURITY_YAMA */
3254
3255#endif /* ! __LINUX_SECURITY_H */ 1640#endif /* ! __LINUX_SECURITY_H */
3256 1641
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index afbb1fd77c77..912a7c482649 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -123,6 +123,7 @@ __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
123__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); 123__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
124 124
125int seq_path(struct seq_file *, const struct path *, const char *); 125int seq_path(struct seq_file *, const struct path *, const char *);
126int seq_file_path(struct seq_file *, struct file *, const char *);
126int seq_dentry(struct seq_file *, struct dentry *, const char *); 127int seq_dentry(struct seq_file *, struct dentry *, const char *);
127int seq_path_root(struct seq_file *m, const struct path *path, 128int seq_path_root(struct seq_file *m, const struct path *path,
128 const struct path *root, const char *esc); 129 const struct path *root, const char *esc);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 486e685a226a..e0582106ef4f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -35,6 +35,7 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/preempt.h> 36#include <linux/preempt.h>
37#include <linux/lockdep.h> 37#include <linux/lockdep.h>
38#include <linux/compiler.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39 40
40/* 41/*
@@ -274,9 +275,87 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
274 s->sequence++; 275 s->sequence++;
275} 276}
276 277
277/* 278static inline int raw_read_seqcount_latch(seqcount_t *s)
279{
280 return lockless_dereference(s->sequence);
281}
282
283/**
278 * raw_write_seqcount_latch - redirect readers to even/odd copy 284 * raw_write_seqcount_latch - redirect readers to even/odd copy
279 * @s: pointer to seqcount_t 285 * @s: pointer to seqcount_t
286 *
287 * The latch technique is a multiversion concurrency control method that allows
288 * queries during non-atomic modifications. If you can guarantee queries never
289 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
290 * -- you most likely do not need this.
291 *
292 * Where the traditional RCU/lockless data structures rely on atomic
293 * modifications to ensure queries observe either the old or the new state the
294 * latch allows the same for non-atomic updates. The trade-off is doubling the
295 * cost of storage; we have to maintain two copies of the entire data
296 * structure.
297 *
298 * Very simply put: we first modify one copy and then the other. This ensures
299 * there is always one copy in a stable state, ready to give us an answer.
300 *
301 * The basic form is a data structure like:
302 *
303 * struct latch_struct {
304 * seqcount_t seq;
305 * struct data_struct data[2];
306 * };
307 *
308 * Where a modification, which is assumed to be externally serialized, does the
309 * following:
310 *
311 * void latch_modify(struct latch_struct *latch, ...)
312 * {
313 * smp_wmb(); <- Ensure that the last data[1] update is visible
314 * latch->seq++;
315 * smp_wmb(); <- Ensure that the seqcount update is visible
316 *
317 * modify(latch->data[0], ...);
318 *
319 * smp_wmb(); <- Ensure that the data[0] update is visible
320 * latch->seq++;
321 * smp_wmb(); <- Ensure that the seqcount update is visible
322 *
323 * modify(latch->data[1], ...);
324 * }
325 *
326 * The query will have a form like:
327 *
328 * struct entry *latch_query(struct latch_struct *latch, ...)
329 * {
330 * struct entry *entry;
331 * unsigned seq, idx;
332 *
333 * do {
334 * seq = lockless_dereference(latch->seq);
335 *
336 * idx = seq & 0x01;
337 * entry = data_query(latch->data[idx], ...);
338 *
339 * smp_rmb();
340 * } while (seq != latch->seq);
341 *
342 * return entry;
343 * }
344 *
345 * So during the modification, queries are first redirected to data[1]. Then we
346 * modify data[0]. When that is complete, we redirect queries back to data[0]
347 * and we can modify data[1].
348 *
349 * NOTE: The non-requirement for atomic modifications does _NOT_ include
350 * the publishing of new entries in the case where data is a dynamic
351 * data structure.
352 *
353 * An iteration might start in data[0] and get suspended long enough
354 * to miss an entire modification sequence, once it resumes it might
355 * observe the new entry.
356 *
357 * NOTE: When data is a dynamic data structure; one should use regular RCU
358 * patterns to manage the lifetimes of the objects within.
280 */ 359 */
281static inline void raw_write_seqcount_latch(seqcount_t *s) 360static inline void raw_write_seqcount_latch(seqcount_t *s)
282{ 361{
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 78097e7a330a..ba82c07feb95 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -12,6 +12,7 @@
12#define _LINUX_SERIAL_8250_H 12#define _LINUX_SERIAL_8250_H
13 13
14#include <linux/serial_core.h> 14#include <linux/serial_core.h>
15#include <linux/serial_reg.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16 17
17/* 18/*
@@ -137,6 +138,8 @@ extern int early_serial_setup(struct uart_port *port);
137 138
138extern unsigned int serial8250_early_in(struct uart_port *port, int offset); 139extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
139extern void serial8250_early_out(struct uart_port *port, int offset, int value); 140extern void serial8250_early_out(struct uart_port *port, int offset, int value);
141extern int early_serial8250_setup(struct earlycon_device *device,
142 const char *options);
140extern void serial8250_do_set_termios(struct uart_port *port, 143extern void serial8250_do_set_termios(struct uart_port *port,
141 struct ktermios *termios, struct ktermios *old); 144 struct ktermios *termios, struct ktermios *old);
142extern int serial8250_do_startup(struct uart_port *port); 145extern int serial8250_do_startup(struct uart_port *port);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 025dad9dcde4..297d4fa1cfe5 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -35,7 +35,7 @@
35#define uart_console(port) \ 35#define uart_console(port) \
36 ((port)->cons && (port)->cons->index == (port)->line) 36 ((port)->cons && (port)->cons->index == (port)->line)
37#else 37#else
38#define uart_console(port) (0) 38#define uart_console(port) ({ (void)port; 0; })
39#endif 39#endif
40 40
41struct uart_port; 41struct uart_port;
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 6c5e3bb282b0..7c536ac5be05 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_SERIAL_SCI_H 1#ifndef __LINUX_SERIAL_SCI_H
2#define __LINUX_SERIAL_SCI_H 2#define __LINUX_SERIAL_SCI_H
3 3
4#include <linux/bitops.h>
4#include <linux/serial_core.h> 5#include <linux/serial_core.h>
5#include <linux/sh_dma.h> 6#include <linux/sh_dma.h>
6 7
@@ -10,59 +11,16 @@
10 11
11#define SCIx_NOT_SUPPORTED (-1) 12#define SCIx_NOT_SUPPORTED (-1)
12 13
13/* SCSMR (Serial Mode Register) */
14#define SCSMR_CHR (1 << 6) /* 7-bit Character Length */
15#define SCSMR_PE (1 << 5) /* Parity Enable */
16#define SCSMR_ODD (1 << 4) /* Odd Parity */
17#define SCSMR_STOP (1 << 3) /* Stop Bit Length */
18#define SCSMR_CKS 0x0003 /* Clock Select */
19
20/* Serial Control Register (@ = not supported by all parts) */ 14/* Serial Control Register (@ = not supported by all parts) */
21#define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */ 15#define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */
22#define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */ 16#define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */
23#define SCSCR_TE (1 << 5) /* Transmit Enable */ 17#define SCSCR_TE BIT(5) /* Transmit Enable */
24#define SCSCR_RE (1 << 4) /* Receive Enable */ 18#define SCSCR_RE BIT(4) /* Receive Enable */
25#define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable @ */ 19#define SCSCR_REIE BIT(3) /* Receive Error Interrupt Enable @ */
26#define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable @ */ 20#define SCSCR_TOIE BIT(2) /* Timeout Interrupt Enable @ */
27#define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */ 21#define SCSCR_CKE1 BIT(1) /* Clock Enable 1 */
28#define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */ 22#define SCSCR_CKE0 BIT(0) /* Clock Enable 0 */
29/* SCIFA/SCIFB only */ 23
30#define SCSCR_TDRQE (1 << 15) /* Tx Data Transfer Request Enable */
31#define SCSCR_RDRQE (1 << 14) /* Rx Data Transfer Request Enable */
32
33/* SCxSR (Serial Status Register) on SCI */
34#define SCI_TDRE 0x80 /* Transmit Data Register Empty */
35#define SCI_RDRF 0x40 /* Receive Data Register Full */
36#define SCI_ORER 0x20 /* Overrun Error */
37#define SCI_FER 0x10 /* Framing Error */
38#define SCI_PER 0x08 /* Parity Error */
39#define SCI_TEND 0x04 /* Transmit End */
40
41#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER)
42
43/* SCxSR (Serial Status Register) on SCIF, HSCIF */
44#define SCIF_ER 0x0080 /* Receive Error */
45#define SCIF_TEND 0x0040 /* Transmission End */
46#define SCIF_TDFE 0x0020 /* Transmit FIFO Data Empty */
47#define SCIF_BRK 0x0010 /* Break Detect */
48#define SCIF_FER 0x0008 /* Framing Error */
49#define SCIF_PER 0x0004 /* Parity Error */
50#define SCIF_RDF 0x0002 /* Receive FIFO Data Full */
51#define SCIF_DR 0x0001 /* Receive Data Ready */
52
53#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
54
55/* SCFCR (FIFO Control Register) */
56#define SCFCR_LOOP (1 << 0) /* Loopback Test */
57
58/* SCSPTR (Serial Port Register), optional */
59#define SCSPTR_RTSIO (1 << 7) /* Serial Port RTS Pin Input/Output */
60#define SCSPTR_CTSIO (1 << 5) /* Serial Port CTS Pin Input/Output */
61#define SCSPTR_SPB2IO (1 << 1) /* Serial Port Break Input/Output */
62#define SCSPTR_SPB2DT (1 << 0) /* Serial Port Break Data */
63
64/* HSSRR HSCIF */
65#define HSCIF_SRE 0x8000 /* Sampling Rate Register Enable */
66 24
67enum { 25enum {
68 SCIx_PROBE_REGTYPE, 26 SCIx_PROBE_REGTYPE,
@@ -82,28 +40,6 @@ enum {
82 SCIx_NR_REGTYPES, 40 SCIx_NR_REGTYPES,
83}; 41};
84 42
85/*
86 * SCI register subset common for all port types.
87 * Not all registers will exist on all parts.
88 */
89enum {
90 SCSMR, /* Serial Mode Register */
91 SCBRR, /* Bit Rate Register */
92 SCSCR, /* Serial Control Register */
93 SCxSR, /* Serial Status Register */
94 SCFCR, /* FIFO Control Register */
95 SCFDR, /* FIFO Data Count Register */
96 SCxTDR, /* Transmit (FIFO) Data Register */
97 SCxRDR, /* Receive (FIFO) Data Register */
98 SCLSR, /* Line Status Register */
99 SCTFDR, /* Transmit FIFO Data Count Register */
100 SCRFDR, /* Receive FIFO Data Count Register */
101 SCSPTR, /* Serial Port Register */
102 HSSRR, /* Sampling Rate Register */
103
104 SCIx_NR_REGS,
105};
106
107struct device; 43struct device;
108 44
109struct plat_sci_port_ops { 45struct plat_sci_port_ops {
@@ -113,7 +49,7 @@ struct plat_sci_port_ops {
113/* 49/*
114 * Port-specific capabilities 50 * Port-specific capabilities
115 */ 51 */
116#define SCIx_HAVE_RTSCTS (1 << 0) 52#define SCIx_HAVE_RTSCTS BIT(0)
117 53
118/* 54/*
119 * Platform device specific platform_data struct 55 * Platform device specific platform_data struct
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f15154a879c7..d6cdd6e87d53 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -34,7 +34,9 @@
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h> 35#include <linux/netdev_features.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <net/flow_keys.h> 37#include <net/flow_dissector.h>
38#include <linux/splice.h>
39#include <linux/in6.h>
38 40
39/* A. Checksumming of received packets by device. 41/* A. Checksumming of received packets by device.
40 * 42 *
@@ -170,13 +172,19 @@ struct nf_bridge_info {
170 BRNF_PROTO_UNCHANGED, 172 BRNF_PROTO_UNCHANGED,
171 BRNF_PROTO_8021Q, 173 BRNF_PROTO_8021Q,
172 BRNF_PROTO_PPPOE 174 BRNF_PROTO_PPPOE
173 } orig_proto; 175 } orig_proto:8;
174 bool pkt_otherhost; 176 bool pkt_otherhost;
177 __u16 frag_max_size;
175 unsigned int mask; 178 unsigned int mask;
176 struct net_device *physindev; 179 struct net_device *physindev;
177 struct net_device *physoutdev; 180 union {
178 char neigh_header[8]; 181 struct net_device *physoutdev;
179 __be32 ipv4_daddr; 182 char neigh_header[8];
183 };
184 union {
185 __be32 ipv4_daddr;
186 struct in6_addr ipv6_daddr;
187 };
180}; 188};
181#endif 189#endif
182 190
@@ -859,6 +867,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
859 int len, int odd, struct sk_buff *skb), 867 int len, int odd, struct sk_buff *skb),
860 void *from, int length); 868 void *from, int length);
861 869
870int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
871 int offset, size_t size);
872
862struct skb_seq_state { 873struct skb_seq_state {
863 __u32 lower_offset; 874 __u32 lower_offset;
864 __u32 upper_offset; 875 __u32 upper_offset;
@@ -919,7 +930,6 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
919 skb->hash = hash; 930 skb->hash = hash;
920} 931}
921 932
922void __skb_get_hash(struct sk_buff *skb);
923static inline __u32 skb_get_hash(struct sk_buff *skb) 933static inline __u32 skb_get_hash(struct sk_buff *skb)
924{ 934{
925 if (!skb->l4_hash && !skb->sw_hash) 935 if (!skb->l4_hash && !skb->sw_hash)
@@ -928,6 +938,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
928 return skb->hash; 938 return skb->hash;
929} 939}
930 940
941__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
942
931static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 943static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
932{ 944{
933 return skb->hash; 945 return skb->hash;
@@ -1935,8 +1947,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
1935 1947
1936 if (skb_transport_header_was_set(skb)) 1948 if (skb_transport_header_was_set(skb))
1937 return; 1949 return;
1938 else if (skb_flow_dissect(skb, &keys)) 1950 else if (skb_flow_dissect_flow_keys(skb, &keys))
1939 skb_set_transport_header(skb, keys.thoff); 1951 skb_set_transport_header(skb, keys.control.thoff);
1940 else 1952 else
1941 skb_set_transport_header(skb, offset_hint); 1953 skb_set_transport_header(skb, offset_hint);
1942} 1954}
@@ -2127,10 +2139,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2127 kfree_skb(skb); 2139 kfree_skb(skb);
2128} 2140}
2129 2141
2130#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
2131#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
2132#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
2133
2134void *netdev_alloc_frag(unsigned int fragsz); 2142void *netdev_alloc_frag(unsigned int fragsz);
2135 2143
2136struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, 2144struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2185,6 +2193,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2185 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 2193 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2186} 2194}
2187 2195
2196static inline void skb_free_frag(void *addr)
2197{
2198 __free_page_frag(addr);
2199}
2200
2188void *napi_alloc_frag(unsigned int fragsz); 2201void *napi_alloc_frag(unsigned int fragsz);
2189struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, 2202struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2190 unsigned int length, gfp_t gfp_mask); 2203 unsigned int length, gfp_t gfp_mask);
@@ -2692,9 +2705,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2692int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); 2705int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2693__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, 2706__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2694 int len, __wsum csum); 2707 int len, __wsum csum);
2695int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 2708ssize_t skb_socket_splice(struct sock *sk,
2709 struct pipe_inode_info *pipe,
2710 struct splice_pipe_desc *spd);
2711int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2696 struct pipe_inode_info *pipe, unsigned int len, 2712 struct pipe_inode_info *pipe, unsigned int len,
2697 unsigned int flags); 2713 unsigned int flags,
2714 ssize_t (*splice_cb)(struct sock *,
2715 struct pipe_inode_info *,
2716 struct splice_pipe_desc *));
2698void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2717void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2699unsigned int skb_zerocopy_headlen(const struct sk_buff *from); 2718unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
2700int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, 2719int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -2729,8 +2748,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2729__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 2748__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2730 __wsum csum); 2749 __wsum csum);
2731 2750
2732static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, 2751static inline void * __must_check
2733 int len, void *data, int hlen, void *buffer) 2752__skb_header_pointer(const struct sk_buff *skb, int offset,
2753 int len, void *data, int hlen, void *buffer)
2734{ 2754{
2735 if (hlen - offset >= len) 2755 if (hlen - offset >= len)
2736 return data + offset; 2756 return data + offset;
@@ -2742,8 +2762,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
2742 return buffer; 2762 return buffer;
2743} 2763}
2744 2764
2745static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2765static inline void * __must_check
2746 int len, void *buffer) 2766skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
2747{ 2767{
2748 return __skb_header_pointer(skb, offset, len, skb->data, 2768 return __skb_header_pointer(skb, offset, len, skb->data,
2749 skb_headlen(skb), buffer); 2769 skb_headlen(skb), buffer);
@@ -3050,7 +3070,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3050 } 3070 }
3051 } else if (skb->csum_bad) { 3071 } else if (skb->csum_bad) {
3052 /* ip_summed == CHECKSUM_NONE in this case */ 3072 /* ip_summed == CHECKSUM_NONE in this case */
3053 return 1; 3073 return (__force __sum16)1;
3054 } 3074 }
3055 3075
3056 skb->csum = psum; 3076 skb->csum = psum;
@@ -3298,9 +3318,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3298 return skb->queue_mapping != 0; 3318 return skb->queue_mapping != 0;
3299} 3319}
3300 3320
3301u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3302 unsigned int num_tx_queues);
3303
3304static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 3321static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3305{ 3322{
3306#ifdef CONFIG_XFRM 3323#ifdef CONFIG_XFRM
@@ -3355,15 +3372,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3355static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) 3372static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3356{ 3373{
3357 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - 3374 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
3358 skb_transport_offset(skb); 3375 skb_transport_offset(skb);
3359 __u16 csum; 3376 __wsum partial;
3360 3377
3361 csum = csum_fold(csum_partial(skb_transport_header(skb), 3378 partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
3362 plen, skb->csum));
3363 skb->csum = res; 3379 skb->csum = res;
3364 SKB_GSO_CB(skb)->csum_start -= plen; 3380 SKB_GSO_CB(skb)->csum_start -= plen;
3365 3381
3366 return csum; 3382 return csum_fold(partial);
3367} 3383}
3368 3384
3369static inline bool skb_is_gso(const struct sk_buff *skb) 3385static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3418,10 +3434,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3418bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 3434bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3419 3435
3420int skb_checksum_setup(struct sk_buff *skb, bool recalculate); 3436int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3421 3437struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3422u32 skb_get_poff(const struct sk_buff *skb); 3438 unsigned int transport_len,
3423u32 __skb_get_poff(const struct sk_buff *skb, void *data, 3439 __sum16(*skb_chkf)(struct sk_buff *skb));
3424 const struct flow_keys *keys, int hlen);
3425 3440
3426/** 3441/**
3427 * skb_head_is_locked - Determine if the skb->head is locked down 3442 * skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ffd24c830151..a99f0e5243e1 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -240,8 +240,8 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
240 * belongs to. 240 * belongs to.
241 * 0 = zero alloc 241 * 0 = zero alloc
242 * 1 = 65 .. 96 bytes 242 * 1 = 65 .. 96 bytes
243 * 2 = 120 .. 192 bytes 243 * 2 = 129 .. 192 bytes
244 * n = 2^(n-1) .. 2^n -1 244 * n = 2^(n-1)+1 .. 2^n
245 */ 245 */
246static __always_inline int kmalloc_index(size_t size) 246static __always_inline int kmalloc_index(size_t size)
247{ 247{
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index d600afb21926..da3c593f9845 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -27,6 +27,8 @@ struct smpboot_thread_data;
27 * @pre_unpark: Optional unpark function, called before the thread is 27 * @pre_unpark: Optional unpark function, called before the thread is
28 * unparked (cpu online). This is not guaranteed to be 28 * unparked (cpu online). This is not guaranteed to be
29 * called on the target cpu of the thread. Careful! 29 * called on the target cpu of the thread. Careful!
30 * @cpumask: Internal state. To update which threads are unparked,
31 * call smpboot_update_cpumask_percpu_thread().
30 * @selfparking: Thread is not parked by the park function. 32 * @selfparking: Thread is not parked by the park function.
31 * @thread_comm: The base name of the thread 33 * @thread_comm: The base name of the thread
32 */ 34 */
@@ -41,11 +43,14 @@ struct smp_hotplug_thread {
41 void (*park)(unsigned int cpu); 43 void (*park)(unsigned int cpu);
42 void (*unpark)(unsigned int cpu); 44 void (*unpark)(unsigned int cpu);
43 void (*pre_unpark)(unsigned int cpu); 45 void (*pre_unpark)(unsigned int cpu);
46 cpumask_var_t cpumask;
44 bool selfparking; 47 bool selfparking;
45 const char *thread_comm; 48 const char *thread_comm;
46}; 49};
47 50
48int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); 51int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
49void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); 52void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
53int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
54 const struct cpumask *);
50 55
51#endif 56#endif
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h
new file mode 100644
index 000000000000..c5f663bba9c2
--- /dev/null
+++ b/include/linux/soc/sunxi/sunxi_sram.h
@@ -0,0 +1,19 @@
1/*
2 * Allwinner SoCs SRAM Controller Driver
3 *
4 * Copyright (C) 2015 Maxime Ripard
5 *
6 * Author: Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#ifndef _SUNXI_SRAM_H_
14#define _SUNXI_SRAM_H_
15
16int sunxi_sram_claim(struct device *dev);
17int sunxi_sram_release(struct device *dev);
18
19#endif /* _SUNXI_SRAM_H_ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 083ac388098e..fddebc617469 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -1,7 +1,10 @@
1#ifndef __SOCK_DIAG_H__ 1#ifndef __SOCK_DIAG_H__
2#define __SOCK_DIAG_H__ 2#define __SOCK_DIAG_H__
3 3
4#include <linux/netlink.h>
4#include <linux/user_namespace.h> 5#include <linux/user_namespace.h>
6#include <net/net_namespace.h>
7#include <net/sock.h>
5#include <uapi/linux/sock_diag.h> 8#include <uapi/linux/sock_diag.h>
6 9
7struct sk_buff; 10struct sk_buff;
@@ -11,6 +14,7 @@ struct sock;
11struct sock_diag_handler { 14struct sock_diag_handler {
12 __u8 family; 15 __u8 family;
13 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); 16 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
17 int (*get_info)(struct sk_buff *skb, struct sock *sk);
14}; 18};
15 19
16int sock_diag_register(const struct sock_diag_handler *h); 20int sock_diag_register(const struct sock_diag_handler *h);
@@ -26,4 +30,42 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
26int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, 30int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
27 struct sk_buff *skb, int attrtype); 31 struct sk_buff *skb, int attrtype);
28 32
33static inline
34enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
35{
36 switch (sk->sk_family) {
37 case AF_INET:
38 switch (sk->sk_protocol) {
39 case IPPROTO_TCP:
40 return SKNLGRP_INET_TCP_DESTROY;
41 case IPPROTO_UDP:
42 return SKNLGRP_INET_UDP_DESTROY;
43 default:
44 return SKNLGRP_NONE;
45 }
46 case AF_INET6:
47 switch (sk->sk_protocol) {
48 case IPPROTO_TCP:
49 return SKNLGRP_INET6_TCP_DESTROY;
50 case IPPROTO_UDP:
51 return SKNLGRP_INET6_UDP_DESTROY;
52 default:
53 return SKNLGRP_NONE;
54 }
55 default:
56 return SKNLGRP_NONE;
57 }
58}
59
60static inline
61bool sock_diag_has_destroy_listeners(const struct sock *sk)
62{
63 const struct net *n = sock_net(sk);
64 const enum sknetlink_groups group = sock_diag_destroy_group(sk);
65
66 return group != SKNLGRP_NONE && n->diag_nlsk &&
67 netlink_has_listeners(n->diag_nlsk, group);
68}
69void sock_diag_broadcast_destroy(struct sock *sk);
70
29#endif 71#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
index e741e8baad92..85b8ee67e937 100644
--- a/include/linux/spi/cc2520.h
+++ b/include/linux/spi/cc2520.h
@@ -21,7 +21,6 @@ struct cc2520_platform_data {
21 int sfd; 21 int sfd;
22 int reset; 22 int reset;
23 int vreg; 23 int vreg;
24 bool amplified;
25}; 24};
26 25
27#endif 26#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 4568a5cc9ab8..c3d1a525bacc 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -29,10 +29,13 @@ struct ssb_sprom {
29 u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */ 29 u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */
30 u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */ 30 u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */
31 u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */ 31 u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */
32 u8 et2mac[6] __aligned(sizeof(u16)); /* MAC address for extra Ethernet */
32 u8 et0phyaddr; /* MII address for enet0 */ 33 u8 et0phyaddr; /* MII address for enet0 */
33 u8 et1phyaddr; /* MII address for enet1 */ 34 u8 et1phyaddr; /* MII address for enet1 */
35 u8 et2phyaddr; /* MII address for enet2 */
34 u8 et0mdcport; /* MDIO for enet0 */ 36 u8 et0mdcport; /* MDIO for enet0 */
35 u8 et1mdcport; /* MDIO for enet1 */ 37 u8 et1mdcport; /* MDIO for enet1 */
38 u8 et2mdcport; /* MDIO for enet2 */
36 u16 dev_id; /* Device ID overriding e.g. PCI ID */ 39 u16 dev_id; /* Device ID overriding e.g. PCI ID */
37 u16 board_rev; /* Board revision number from SPROM. */ 40 u16 board_rev; /* Board revision number from SPROM. */
38 u16 board_num; /* Board number from SPROM. */ 41 u16 board_num; /* Board number from SPROM. */
@@ -88,11 +91,14 @@ struct ssb_sprom {
88 u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ 91 u32 ofdm5glpo; /* 5.2GHz OFDM power offset */
89 u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ 92 u32 ofdm5gpo; /* 5.3GHz OFDM power offset */
90 u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ 93 u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */
94 u32 boardflags;
95 u32 boardflags2;
96 u32 boardflags3;
97 /* TODO: Switch all drivers to new u32 fields and drop below ones */
91 u16 boardflags_lo; /* Board flags (bits 0-15) */ 98 u16 boardflags_lo; /* Board flags (bits 0-15) */
92 u16 boardflags_hi; /* Board flags (bits 16-31) */ 99 u16 boardflags_hi; /* Board flags (bits 16-31) */
93 u16 boardflags2_lo; /* Board flags (bits 32-47) */ 100 u16 boardflags2_lo; /* Board flags (bits 32-47) */
94 u16 boardflags2_hi; /* Board flags (bits 48-63) */ 101 u16 boardflags2_hi; /* Board flags (bits 48-63) */
95 /* TODO store board flags in a single u64 */
96 102
97 struct ssb_sprom_core_pwr_info core_pwr_info[4]; 103 struct ssb_sprom_core_pwr_info core_pwr_info[4];
98 104
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 076af437284d..9c61c7cda936 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -3,7 +3,6 @@
3 3
4#include <uapi/linux/stddef.h> 4#include <uapi/linux/stddef.h>
5 5
6
7#undef NULL 6#undef NULL
8#define NULL ((void *)0) 7#define NULL ((void *)0)
9 8
@@ -14,10 +13,9 @@ enum {
14 13
15#undef offsetof 14#undef offsetof
16#ifdef __compiler_offsetof 15#ifdef __compiler_offsetof
17#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) 16#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
18#else 17#else
19#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) 18#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
20#endif
21#endif 19#endif
22 20
23/** 21/**
@@ -28,3 +26,5 @@ enum {
28 */ 26 */
29#define offsetofend(TYPE, MEMBER) \ 27#define offsetofend(TYPE, MEMBER) \
30 (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) 28 (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
29
30#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7f484a239f53..c735f5c91eea 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
99 int phy_addr; 99 int phy_addr;
100 int interface; 100 int interface;
101 struct stmmac_mdio_bus_data *mdio_bus_data; 101 struct stmmac_mdio_bus_data *mdio_bus_data;
102 struct device_node *phy_node;
102 struct stmmac_dma_cfg *dma_cfg; 103 struct stmmac_dma_cfg *dma_cfg;
103 int clk_csr; 104 int clk_csr;
104 int has_gmac; 105 int has_gmac;
diff --git a/include/linux/string.h b/include/linux/string.h
index e40099e585c9..a8d90db9c4b0 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -111,6 +111,7 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
111extern void * memchr(const void *,int,__kernel_size_t); 111extern void * memchr(const void *,int,__kernel_size_t);
112#endif 112#endif
113void *memchr_inv(const void *s, int c, size_t n); 113void *memchr_inv(const void *s, int c, size_t n);
114char *strreplace(char *s, char old, char new);
114 115
115extern void kfree_const(const void *x); 116extern void kfree_const(const void *x);
116 117
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 2ca67b55e0fe..8df43c9f11dc 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -37,7 +37,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
37void xprt_free_bc_request(struct rpc_rqst *req); 37void xprt_free_bc_request(struct rpc_rqst *req);
38int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); 38int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
39void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); 39void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
40int bc_send(struct rpc_rqst *req);
41 40
42/* 41/*
43 * Determine if a shared backchannel is in use 42 * Determine if a shared backchannel is in use
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 598ba80ec30c..131032f15cc1 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -56,6 +56,7 @@ struct rpc_clnt {
56 struct rpc_rtt * cl_rtt; /* RTO estimator data */ 56 struct rpc_rtt * cl_rtt; /* RTO estimator data */
57 const struct rpc_timeout *cl_timeout; /* Timeout strategy */ 57 const struct rpc_timeout *cl_timeout; /* Timeout strategy */
58 58
59 atomic_t cl_swapper; /* swapfile count */
59 int cl_nodelen; /* nodename length */ 60 int cl_nodelen; /* nodename length */
60 char cl_nodename[UNX_MAXNODENAME+1]; 61 char cl_nodename[UNX_MAXNODENAME+1];
61 struct rpc_pipe_dir_head cl_pipedir_objects; 62 struct rpc_pipe_dir_head cl_pipedir_objects;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 5f1e6bd4c316..d703f0ef37d8 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -205,8 +205,7 @@ struct rpc_wait_queue {
205 */ 205 */
206struct rpc_task *rpc_new_task(const struct rpc_task_setup *); 206struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
207struct rpc_task *rpc_run_task(const struct rpc_task_setup *); 207struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
208struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 208struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
209 const struct rpc_call_ops *ops);
210void rpc_put_task(struct rpc_task *); 209void rpc_put_task(struct rpc_task *);
211void rpc_put_task_async(struct rpc_task *); 210void rpc_put_task_async(struct rpc_task *);
212void rpc_exit_task(struct rpc_task *); 211void rpc_exit_task(struct rpc_task *);
@@ -269,4 +268,20 @@ static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
269} 268}
270#endif 269#endif
271 270
271#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
272int rpc_clnt_swap_activate(struct rpc_clnt *clnt);
273void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt);
274#else
275static inline int
276rpc_clnt_swap_activate(struct rpc_clnt *clnt)
277{
278 return -EINVAL;
279}
280
281static inline void
282rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
283{
284}
285#endif /* CONFIG_SUNRPC_SWAP */
286
272#endif /* _LINUX_SUNRPC_SCHED_H_ */ 287#endif /* _LINUX_SUNRPC_SCHED_H_ */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index df8edf8ec914..cb94ee4181d4 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -172,6 +172,13 @@ struct svcxprt_rdma {
172#define RDMAXPRT_SQ_PENDING 2 172#define RDMAXPRT_SQ_PENDING 2
173#define RDMAXPRT_CONN_PENDING 3 173#define RDMAXPRT_CONN_PENDING 3
174 174
175#define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */
176#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
177#define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD
178#else
179#define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
180#endif
181
175#define RPCRDMA_LISTEN_BACKLOG 10 182#define RPCRDMA_LISTEN_BACKLOG 10
176/* The default ORD value is based on two outstanding full-size writes with a 183/* The default ORD value is based on two outstanding full-size writes with a
177 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ 184 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
@@ -182,10 +189,9 @@ struct svcxprt_rdma {
182 189
183/* svc_rdma_marshal.c */ 190/* svc_rdma_marshal.c */
184extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); 191extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
185extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *);
186extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 192extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
187 struct rpcrdma_msg *, 193 struct rpcrdma_msg *,
188 enum rpcrdma_errcode, u32 *); 194 enum rpcrdma_errcode, __be32 *);
189extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); 195extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
190extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); 196extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
191extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, 197extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
@@ -212,7 +218,6 @@ extern int svc_rdma_sendto(struct svc_rqst *);
212extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 218extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
213extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, 219extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
214 enum rpcrdma_errcode); 220 enum rpcrdma_errcode);
215struct page *svc_rdma_get_page(void);
216extern int svc_rdma_post_recv(struct svcxprt_rdma *); 221extern int svc_rdma_post_recv(struct svcxprt_rdma *);
217extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 222extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
218extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 223extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 8b93ef53df3c..0fb9acbb4780 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -133,6 +133,9 @@ struct rpc_xprt_ops {
133 void (*close)(struct rpc_xprt *xprt); 133 void (*close)(struct rpc_xprt *xprt);
134 void (*destroy)(struct rpc_xprt *xprt); 134 void (*destroy)(struct rpc_xprt *xprt);
135 void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); 135 void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
136 int (*enable_swap)(struct rpc_xprt *xprt);
137 void (*disable_swap)(struct rpc_xprt *xprt);
138 void (*inject_disconnect)(struct rpc_xprt *xprt);
136}; 139};
137 140
138/* 141/*
@@ -180,7 +183,7 @@ struct rpc_xprt {
180 atomic_t num_reqs; /* total slots */ 183 atomic_t num_reqs; /* total slots */
181 unsigned long state; /* transport state */ 184 unsigned long state; /* transport state */
182 unsigned char resvport : 1; /* use a reserved port */ 185 unsigned char resvport : 1; /* use a reserved port */
183 unsigned int swapper; /* we're swapping over this 186 atomic_t swapper; /* we're swapping over this
184 transport */ 187 transport */
185 unsigned int bind_index; /* bind function index */ 188 unsigned int bind_index; /* bind function index */
186 189
@@ -212,7 +215,8 @@ struct rpc_xprt {
212#if defined(CONFIG_SUNRPC_BACKCHANNEL) 215#if defined(CONFIG_SUNRPC_BACKCHANNEL)
213 struct svc_serv *bc_serv; /* The RPC service which will */ 216 struct svc_serv *bc_serv; /* The RPC service which will */
214 /* process the callback */ 217 /* process the callback */
215 unsigned int bc_alloc_count; /* Total number of preallocs */ 218 int bc_alloc_count; /* Total number of preallocs */
219 atomic_t bc_free_slots;
216 spinlock_t bc_pa_lock; /* Protects the preallocated 220 spinlock_t bc_pa_lock; /* Protects the preallocated
217 * items */ 221 * items */
218 struct list_head bc_pa_list; /* List of preallocated 222 struct list_head bc_pa_list; /* List of preallocated
@@ -241,6 +245,7 @@ struct rpc_xprt {
241 const char *address_strings[RPC_DISPLAY_MAX]; 245 const char *address_strings[RPC_DISPLAY_MAX];
242#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 246#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
243 struct dentry *debugfs; /* debugfs directory */ 247 struct dentry *debugfs; /* debugfs directory */
248 atomic_t inject_disconnect;
244#endif 249#endif
245}; 250};
246 251
@@ -327,6 +332,18 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
327 return p + xprt->tsh_size; 332 return p + xprt->tsh_size;
328} 333}
329 334
335static inline int
336xprt_enable_swap(struct rpc_xprt *xprt)
337{
338 return xprt->ops->enable_swap(xprt);
339}
340
341static inline void
342xprt_disable_swap(struct rpc_xprt *xprt)
343{
344 xprt->ops->disable_swap(xprt);
345}
346
330/* 347/*
331 * Transport switch helper functions 348 * Transport switch helper functions
332 */ 349 */
@@ -345,7 +362,6 @@ void xprt_release_rqst_cong(struct rpc_task *task);
345void xprt_disconnect_done(struct rpc_xprt *xprt); 362void xprt_disconnect_done(struct rpc_xprt *xprt);
346void xprt_force_disconnect(struct rpc_xprt *xprt); 363void xprt_force_disconnect(struct rpc_xprt *xprt);
347void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); 364void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
348int xs_swapper(struct rpc_xprt *xprt, int enable);
349 365
350bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); 366bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
351void xprt_unlock_connect(struct rpc_xprt *, void *); 367void xprt_unlock_connect(struct rpc_xprt *, void *);
@@ -431,6 +447,23 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
431 return test_and_set_bit(XPRT_BINDING, &xprt->state); 447 return test_and_set_bit(XPRT_BINDING, &xprt->state);
432} 448}
433 449
450#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
451extern unsigned int rpc_inject_disconnect;
452static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
453{
454 if (!rpc_inject_disconnect)
455 return;
456 if (atomic_dec_return(&xprt->inject_disconnect))
457 return;
458 atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
459 xprt->ops->inject_disconnect(xprt);
460}
461#else
462static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
463{
464}
465#endif
466
434#endif /* __KERNEL__*/ 467#endif /* __KERNEL__*/
435 468
436#endif /* _LINUX_SUNRPC_XPRT_H */ 469#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index c984c85981ea..b17613052cc3 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -56,7 +56,8 @@
56 56
57#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */ 57#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
58 58
59/* memory registration strategies */ 59/* Memory registration strategies, by number.
60 * This is part of a kernel / user space API. Do not remove. */
60enum rpcrdma_memreg { 61enum rpcrdma_memreg {
61 RPCRDMA_BOUNCEBUFFERS = 0, 62 RPCRDMA_BOUNCEBUFFERS = 0,
62 RPCRDMA_REGISTER, 63 RPCRDMA_REGISTER,
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cee108cbe2d5..38874729dc5f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -377,7 +377,6 @@ extern void end_swap_bio_write(struct bio *bio, int err);
377extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 377extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
378 void (*end_write_func)(struct bio *, int)); 378 void (*end_write_func)(struct bio *, int));
379extern int swap_set_page_dirty(struct page *page); 379extern int swap_set_page_dirty(struct page *page);
380extern void end_swap_bio_read(struct bio *bio, int err);
381 380
382int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 381int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
383 unsigned long nr_pages, sector_t start_block); 382 unsigned long nr_pages, sector_t start_block);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 76d1e38aabe1..b45c45b8c829 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -111,14 +111,14 @@ union bpf_attr;
111#define __SC_STR_ADECL(t, a) #a 111#define __SC_STR_ADECL(t, a) #a
112#define __SC_STR_TDECL(t, a) #t 112#define __SC_STR_TDECL(t, a) #t
113 113
114extern struct ftrace_event_class event_class_syscall_enter; 114extern struct trace_event_class event_class_syscall_enter;
115extern struct ftrace_event_class event_class_syscall_exit; 115extern struct trace_event_class event_class_syscall_exit;
116extern struct trace_event_functions enter_syscall_print_funcs; 116extern struct trace_event_functions enter_syscall_print_funcs;
117extern struct trace_event_functions exit_syscall_print_funcs; 117extern struct trace_event_functions exit_syscall_print_funcs;
118 118
119#define SYSCALL_TRACE_ENTER_EVENT(sname) \ 119#define SYSCALL_TRACE_ENTER_EVENT(sname) \
120 static struct syscall_metadata __syscall_meta_##sname; \ 120 static struct syscall_metadata __syscall_meta_##sname; \
121 static struct ftrace_event_call __used \ 121 static struct trace_event_call __used \
122 event_enter_##sname = { \ 122 event_enter_##sname = { \
123 .class = &event_class_syscall_enter, \ 123 .class = &event_class_syscall_enter, \
124 { \ 124 { \
@@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs;
128 .data = (void *)&__syscall_meta_##sname,\ 128 .data = (void *)&__syscall_meta_##sname,\
129 .flags = TRACE_EVENT_FL_CAP_ANY, \ 129 .flags = TRACE_EVENT_FL_CAP_ANY, \
130 }; \ 130 }; \
131 static struct ftrace_event_call __used \ 131 static struct trace_event_call __used \
132 __attribute__((section("_ftrace_events"))) \ 132 __attribute__((section("_ftrace_events"))) \
133 *__event_enter_##sname = &event_enter_##sname; 133 *__event_enter_##sname = &event_enter_##sname;
134 134
135#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 135#define SYSCALL_TRACE_EXIT_EVENT(sname) \
136 static struct syscall_metadata __syscall_meta_##sname; \ 136 static struct syscall_metadata __syscall_meta_##sname; \
137 static struct ftrace_event_call __used \ 137 static struct trace_event_call __used \
138 event_exit_##sname = { \ 138 event_exit_##sname = { \
139 .class = &event_class_syscall_exit, \ 139 .class = &event_class_syscall_exit, \
140 { \ 140 { \
@@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
144 .data = (void *)&__syscall_meta_##sname,\ 144 .data = (void *)&__syscall_meta_##sname,\
145 .flags = TRACE_EVENT_FL_CAP_ANY, \ 145 .flags = TRACE_EVENT_FL_CAP_ANY, \
146 }; \ 146 }; \
147 static struct ftrace_event_call __used \ 147 static struct trace_event_call __used \
148 __attribute__((section("_ftrace_events"))) \ 148 __attribute__((section("_ftrace_events"))) \
149 *__event_exit_##sname = &event_exit_##sname; 149 *__event_exit_##sname = &event_exit_##sname;
150 150
@@ -827,15 +827,15 @@ asmlinkage long sys_syncfs(int fd);
827asmlinkage long sys_fork(void); 827asmlinkage long sys_fork(void);
828asmlinkage long sys_vfork(void); 828asmlinkage long sys_vfork(void);
829#ifdef CONFIG_CLONE_BACKWARDS 829#ifdef CONFIG_CLONE_BACKWARDS
830asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, 830asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long,
831 int __user *); 831 int __user *);
832#else 832#else
833#ifdef CONFIG_CLONE_BACKWARDS3 833#ifdef CONFIG_CLONE_BACKWARDS3
834asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, 834asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
835 int __user *, int); 835 int __user *, unsigned long);
836#else 836#else
837asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, 837asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
838 int __user *, int); 838 int __user *, unsigned long);
839#endif 839#endif
840#endif 840#endif
841 841
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 795d5fea5697..fa7bc29925c9 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -188,6 +188,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
188void unregister_sysctl_table(struct ctl_table_header * table); 188void unregister_sysctl_table(struct ctl_table_header * table);
189 189
190extern int sysctl_init(void); 190extern int sysctl_init(void);
191
192extern struct ctl_table sysctl_mount_point[];
193
191#else /* CONFIG_SYSCTL */ 194#else /* CONFIG_SYSCTL */
192static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) 195static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
193{ 196{
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 99382c0df17e..9f65758311a4 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -210,6 +210,10 @@ int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
210int __must_check sysfs_move_dir_ns(struct kobject *kobj, 210int __must_check sysfs_move_dir_ns(struct kobject *kobj,
211 struct kobject *new_parent_kobj, 211 struct kobject *new_parent_kobj,
212 const void *new_ns); 212 const void *new_ns);
213int __must_check sysfs_create_mount_point(struct kobject *parent_kobj,
214 const char *name);
215void sysfs_remove_mount_point(struct kobject *parent_kobj,
216 const char *name);
213 217
214int __must_check sysfs_create_file_ns(struct kobject *kobj, 218int __must_check sysfs_create_file_ns(struct kobject *kobj,
215 const struct attribute *attr, 219 const struct attribute *attr,
@@ -298,6 +302,17 @@ static inline int sysfs_move_dir_ns(struct kobject *kobj,
298 return 0; 302 return 0;
299} 303}
300 304
305static inline int sysfs_create_mount_point(struct kobject *parent_kobj,
306 const char *name)
307{
308 return 0;
309}
310
311static inline void sysfs_remove_mount_point(struct kobject *parent_kobj,
312 const char *name)
313{
314}
315
301static inline int sysfs_create_file_ns(struct kobject *kobj, 316static inline int sysfs_create_file_ns(struct kobject *kobj,
302 const struct attribute *attr, 317 const struct attribute *attr,
303 const void *ns) 318 const void *ns)
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 4b7b875a7ce1..c3a7f0cc3a27 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -47,12 +47,12 @@
47#define SYSLOG_FROM_READER 0 47#define SYSLOG_FROM_READER 0
48#define SYSLOG_FROM_PROC 1 48#define SYSLOG_FROM_PROC 1
49 49
50int do_syslog(int type, char __user *buf, int count, bool from_file); 50int do_syslog(int type, char __user *buf, int count, int source);
51 51
52#ifdef CONFIG_PRINTK 52#ifdef CONFIG_PRINTK
53int check_syslog_permissions(int type, bool from_file); 53int check_syslog_permissions(int type, int source);
54#else 54#else
55static inline int check_syslog_permissions(int type, bool from_file) 55static inline int check_syslog_permissions(int type, int source)
56{ 56{
57 return 0; 57 return 0;
58} 58}
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e8bbf403618f..48c3696e8645 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -149,11 +149,16 @@ struct tcp_sock {
149 * sum(delta(rcv_nxt)), or how many bytes 149 * sum(delta(rcv_nxt)), or how many bytes
150 * were acked. 150 * were acked.
151 */ 151 */
152 u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
153 * total number of segments in.
154 */
152 u32 rcv_nxt; /* What we want to receive next */ 155 u32 rcv_nxt; /* What we want to receive next */
153 u32 copied_seq; /* Head of yet unread data */ 156 u32 copied_seq; /* Head of yet unread data */
154 u32 rcv_wup; /* rcv_nxt on last window update sent */ 157 u32 rcv_wup; /* rcv_nxt on last window update sent */
155 u32 snd_nxt; /* Next sequence we send */ 158 u32 snd_nxt; /* Next sequence we send */
156 159 u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
160 * The total number of segments sent.
161 */
157 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 162 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
158 * sum(delta(snd_una)), or how many bytes 163 * sum(delta(snd_una)), or how many bytes
159 * were acked. 164 * were acked.
@@ -201,6 +206,7 @@ struct tcp_sock {
201 syn_fastopen:1, /* SYN includes Fast Open option */ 206 syn_fastopen:1, /* SYN includes Fast Open option */
202 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ 207 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
203 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ 208 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
209 save_syn:1, /* Save headers of SYN packet */
204 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ 210 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
205 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ 211 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
206 212
@@ -328,6 +334,7 @@ struct tcp_sock {
328 * socket. Used to retransmit SYNACKs etc. 334 * socket. Used to retransmit SYNACKs etc.
329 */ 335 */
330 struct request_sock *fastopen_rsk; 336 struct request_sock *fastopen_rsk;
337 u32 *saved_syn;
331}; 338};
332 339
333enum tsq_flags { 340enum tsq_flags {
@@ -395,4 +402,10 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog)
395 return 0; 402 return 0;
396} 403}
397 404
405static inline void tcp_saved_syn_free(struct tcp_sock *tp)
406{
407 kfree(tp->saved_syn);
408 tp->saved_syn = NULL;
409}
410
398#endif /* _LINUX_TCP_H */ 411#endif /* _LINUX_TCP_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 5eac316490ea..037e9df2f610 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -40,6 +40,9 @@
40/* No upper/lower limit requirement */ 40/* No upper/lower limit requirement */
41#define THERMAL_NO_LIMIT ((u32)~0) 41#define THERMAL_NO_LIMIT ((u32)~0)
42 42
43/* Default weight of a bound cooling device */
44#define THERMAL_WEIGHT_DEFAULT 0
45
43/* Unit conversion macros */ 46/* Unit conversion macros */
44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ 47#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10) 48 ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
@@ -56,10 +59,13 @@
56#define DEFAULT_THERMAL_GOVERNOR "fair_share" 59#define DEFAULT_THERMAL_GOVERNOR "fair_share"
57#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) 60#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
58#define DEFAULT_THERMAL_GOVERNOR "user_space" 61#define DEFAULT_THERMAL_GOVERNOR "user_space"
62#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
63#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
59#endif 64#endif
60 65
61struct thermal_zone_device; 66struct thermal_zone_device;
62struct thermal_cooling_device; 67struct thermal_cooling_device;
68struct thermal_instance;
63 69
64enum thermal_device_mode { 70enum thermal_device_mode {
65 THERMAL_DEVICE_DISABLED = 0, 71 THERMAL_DEVICE_DISABLED = 0,
@@ -113,6 +119,12 @@ struct thermal_cooling_device_ops {
113 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); 119 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
114 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); 120 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
115 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); 121 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
122 int (*get_requested_power)(struct thermal_cooling_device *,
123 struct thermal_zone_device *, u32 *);
124 int (*state2power)(struct thermal_cooling_device *,
125 struct thermal_zone_device *, unsigned long, u32 *);
126 int (*power2state)(struct thermal_cooling_device *,
127 struct thermal_zone_device *, u32, unsigned long *);
116}; 128};
117 129
118struct thermal_cooling_device { 130struct thermal_cooling_device {
@@ -144,8 +156,7 @@ struct thermal_attr {
144 * @devdata: private pointer for device private data 156 * @devdata: private pointer for device private data
145 * @trips: number of trip points the thermal zone supports 157 * @trips: number of trip points the thermal zone supports
146 * @passive_delay: number of milliseconds to wait between polls when 158 * @passive_delay: number of milliseconds to wait between polls when
147 * performing passive cooling. Currenty only used by the 159 * performing passive cooling.
148 * step-wise governor
149 * @polling_delay: number of milliseconds to wait between polls when 160 * @polling_delay: number of milliseconds to wait between polls when
150 * checking whether trip points have been crossed (0 for 161 * checking whether trip points have been crossed (0 for
151 * interrupt driven systems) 162 * interrupt driven systems)
@@ -155,13 +166,13 @@ struct thermal_attr {
155 * @last_temperature: previous temperature read 166 * @last_temperature: previous temperature read
156 * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION 167 * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
157 * @passive: 1 if you've crossed a passive trip point, 0 otherwise. 168 * @passive: 1 if you've crossed a passive trip point, 0 otherwise.
158 * Currenty only used by the step-wise governor.
159 * @forced_passive: If > 0, temperature at which to switch on all ACPI 169 * @forced_passive: If > 0, temperature at which to switch on all ACPI
160 * processor cooling devices. Currently only used by the 170 * processor cooling devices. Currently only used by the
161 * step-wise governor. 171 * step-wise governor.
162 * @ops: operations this &thermal_zone_device supports 172 * @ops: operations this &thermal_zone_device supports
163 * @tzp: thermal zone parameters 173 * @tzp: thermal zone parameters
164 * @governor: pointer to the governor for this thermal zone 174 * @governor: pointer to the governor for this thermal zone
175 * @governor_data: private pointer for governor data
165 * @thermal_instances: list of &struct thermal_instance of this thermal zone 176 * @thermal_instances: list of &struct thermal_instance of this thermal zone
166 * @idr: &struct idr to generate unique id for this zone's cooling 177 * @idr: &struct idr to generate unique id for this zone's cooling
167 * devices 178 * devices
@@ -186,8 +197,9 @@ struct thermal_zone_device {
186 int passive; 197 int passive;
187 unsigned int forced_passive; 198 unsigned int forced_passive;
188 struct thermal_zone_device_ops *ops; 199 struct thermal_zone_device_ops *ops;
189 const struct thermal_zone_params *tzp; 200 struct thermal_zone_params *tzp;
190 struct thermal_governor *governor; 201 struct thermal_governor *governor;
202 void *governor_data;
191 struct list_head thermal_instances; 203 struct list_head thermal_instances;
192 struct idr idr; 204 struct idr idr;
193 struct mutex lock; 205 struct mutex lock;
@@ -198,12 +210,19 @@ struct thermal_zone_device {
198/** 210/**
199 * struct thermal_governor - structure that holds thermal governor information 211 * struct thermal_governor - structure that holds thermal governor information
200 * @name: name of the governor 212 * @name: name of the governor
213 * @bind_to_tz: callback called when binding to a thermal zone. If it
214 * returns 0, the governor is bound to the thermal zone,
215 * otherwise it fails.
216 * @unbind_from_tz: callback called when a governor is unbound from a
217 * thermal zone.
201 * @throttle: callback called for every trip point even if temperature is 218 * @throttle: callback called for every trip point even if temperature is
202 * below the trip point temperature 219 * below the trip point temperature
203 * @governor_list: node in thermal_governor_list (in thermal_core.c) 220 * @governor_list: node in thermal_governor_list (in thermal_core.c)
204 */ 221 */
205struct thermal_governor { 222struct thermal_governor {
206 char name[THERMAL_NAME_LENGTH]; 223 char name[THERMAL_NAME_LENGTH];
224 int (*bind_to_tz)(struct thermal_zone_device *tz);
225 void (*unbind_from_tz)(struct thermal_zone_device *tz);
207 int (*throttle)(struct thermal_zone_device *tz, int trip); 226 int (*throttle)(struct thermal_zone_device *tz, int trip);
208 struct list_head governor_list; 227 struct list_head governor_list;
209}; 228};
@@ -214,9 +233,12 @@ struct thermal_bind_params {
214 233
215 /* 234 /*
216 * This is a measure of 'how effectively these devices can 235 * This is a measure of 'how effectively these devices can
217 * cool 'this' thermal zone. The shall be determined by platform 236 * cool 'this' thermal zone. It shall be determined by
218 * characterization. This is on a 'percentage' scale. 237 * platform characterization. This value is relative to the
219 * See Documentation/thermal/sysfs-api.txt for more information. 238 * rest of the weights so a cooling device whose weight is
239 * double that of another cooling device is twice as
240 * effective. See Documentation/thermal/sysfs-api.txt for more
241 * information.
220 */ 242 */
221 int weight; 243 int weight;
222 244
@@ -253,6 +275,44 @@ struct thermal_zone_params {
253 275
254 int num_tbps; /* Number of tbp entries */ 276 int num_tbps; /* Number of tbp entries */
255 struct thermal_bind_params *tbp; 277 struct thermal_bind_params *tbp;
278
279 /*
280 * Sustainable power (heat) that this thermal zone can dissipate in
281 * mW
282 */
283 u32 sustainable_power;
284
285 /*
286 * Proportional parameter of the PID controller when
287 * overshooting (i.e., when temperature is below the target)
288 */
289 s32 k_po;
290
291 /*
292 * Proportional parameter of the PID controller when
293 * undershooting
294 */
295 s32 k_pu;
296
297 /* Integral parameter of the PID controller */
298 s32 k_i;
299
300 /* Derivative parameter of the PID controller */
301 s32 k_d;
302
303 /* threshold below which the error is no longer accumulated */
304 s32 integral_cutoff;
305
306 /*
307 * @slope: slope of a linear temperature adjustment curve.
308 * Used by thermal zone drivers.
309 */
310 int slope;
311 /*
312 * @offset: offset of a linear temperature adjustment curve.
313 * Used by thermal zone drivers (default 0).
314 */
315 int offset;
256}; 316};
257 317
258struct thermal_genl_event { 318struct thermal_genl_event {
@@ -316,14 +376,25 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
316#endif 376#endif
317 377
318#if IS_ENABLED(CONFIG_THERMAL) 378#if IS_ENABLED(CONFIG_THERMAL)
379static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
380{
381 return cdev->ops->get_requested_power && cdev->ops->state2power &&
382 cdev->ops->power2state;
383}
384
385int power_actor_get_max_power(struct thermal_cooling_device *,
386 struct thermal_zone_device *tz, u32 *max_power);
387int power_actor_set_power(struct thermal_cooling_device *,
388 struct thermal_instance *, u32);
319struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 389struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
320 void *, struct thermal_zone_device_ops *, 390 void *, struct thermal_zone_device_ops *,
321 const struct thermal_zone_params *, int, int); 391 struct thermal_zone_params *, int, int);
322void thermal_zone_device_unregister(struct thermal_zone_device *); 392void thermal_zone_device_unregister(struct thermal_zone_device *);
323 393
324int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, 394int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
325 struct thermal_cooling_device *, 395 struct thermal_cooling_device *,
326 unsigned long, unsigned long); 396 unsigned long, unsigned long,
397 unsigned int);
327int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, 398int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
328 struct thermal_cooling_device *); 399 struct thermal_cooling_device *);
329void thermal_zone_device_update(struct thermal_zone_device *); 400void thermal_zone_device_update(struct thermal_zone_device *);
@@ -343,6 +414,14 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
343void thermal_cdev_update(struct thermal_cooling_device *); 414void thermal_cdev_update(struct thermal_cooling_device *);
344void thermal_notify_framework(struct thermal_zone_device *, int); 415void thermal_notify_framework(struct thermal_zone_device *, int);
345#else 416#else
417static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
418{ return false; }
419static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
420 struct thermal_zone_device *tz, u32 *max_power)
421{ return 0; }
422static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
423 struct thermal_instance *tz, u32 power)
424{ return 0; }
346static inline struct thermal_zone_device *thermal_zone_device_register( 425static inline struct thermal_zone_device *thermal_zone_device_register(
347 const char *type, int trips, int mask, void *devdata, 426 const char *type, int trips, int mask, void *devdata,
348 struct thermal_zone_device_ops *ops, 427 struct thermal_zone_device_ops *ops,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 4191b5623a28..edbfc9a5293e 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -13,8 +13,6 @@
13 13
14#ifdef CONFIG_GENERIC_CLOCKEVENTS 14#ifdef CONFIG_GENERIC_CLOCKEVENTS
15extern void __init tick_init(void); 15extern void __init tick_init(void);
16extern void tick_freeze(void);
17extern void tick_unfreeze(void);
18/* Should be core only, but ARM BL switcher requires it */ 16/* Should be core only, but ARM BL switcher requires it */
19extern void tick_suspend_local(void); 17extern void tick_suspend_local(void);
20/* Should be core only, but XEN resume magic and ARM BL switcher require it */ 18/* Should be core only, but XEN resume magic and ARM BL switcher require it */
@@ -23,14 +21,20 @@ extern void tick_handover_do_timer(void);
23extern void tick_cleanup_dead_cpu(int cpu); 21extern void tick_cleanup_dead_cpu(int cpu);
24#else /* CONFIG_GENERIC_CLOCKEVENTS */ 22#else /* CONFIG_GENERIC_CLOCKEVENTS */
25static inline void tick_init(void) { } 23static inline void tick_init(void) { }
26static inline void tick_freeze(void) { }
27static inline void tick_unfreeze(void) { }
28static inline void tick_suspend_local(void) { } 24static inline void tick_suspend_local(void) { }
29static inline void tick_resume_local(void) { } 25static inline void tick_resume_local(void) { }
30static inline void tick_handover_do_timer(void) { } 26static inline void tick_handover_do_timer(void) { }
31static inline void tick_cleanup_dead_cpu(int cpu) { } 27static inline void tick_cleanup_dead_cpu(int cpu) { }
32#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 28#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
33 29
30#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
31extern void tick_freeze(void);
32extern void tick_unfreeze(void);
33#else
34static inline void tick_freeze(void) { }
35static inline void tick_unfreeze(void) { }
36#endif
37
34#ifdef CONFIG_TICK_ONESHOT 38#ifdef CONFIG_TICK_ONESHOT
35extern void tick_irq_enter(void); 39extern void tick_irq_enter(void);
36# ifndef arch_needs_cpu 40# ifndef arch_needs_cpu
@@ -63,10 +67,13 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
63static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 67static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
64#endif /* BROADCAST */ 68#endif /* BROADCAST */
65 69
66#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 70#ifdef CONFIG_GENERIC_CLOCKEVENTS
67extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 71extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
68#else 72#else
69static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } 73static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
74{
75 return 0;
76}
70#endif 77#endif
71 78
72static inline void tick_broadcast_enable(void) 79static inline void tick_broadcast_enable(void)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 3aa72e648650..6e191e4e6ab6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -145,7 +145,6 @@ static inline void getboottime(struct timespec *ts)
145} 145}
146#endif 146#endif
147 147
148#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
149#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 148#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
150 149
151/* 150/*
diff --git a/include/linux/ftrace_event.h b/include/linux/trace_events.h
index f9ecf63d47f1..1063c850dbab 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/trace_events.h
@@ -1,6 +1,6 @@
1 1
2#ifndef _LINUX_FTRACE_EVENT_H 2#ifndef _LINUX_TRACE_EVENT_H
3#define _LINUX_FTRACE_EVENT_H 3#define _LINUX_TRACE_EVENT_H
4 4
5#include <linux/ring_buffer.h> 5#include <linux/ring_buffer.h>
6#include <linux/trace_seq.h> 6#include <linux/trace_seq.h>
@@ -25,35 +25,35 @@ struct trace_print_flags_u64 {
25 const char *name; 25 const char *name;
26}; 26};
27 27
28const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 28const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
29 unsigned long flags, 29 unsigned long flags,
30 const struct trace_print_flags *flag_array); 30 const struct trace_print_flags *flag_array);
31 31
32const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 32const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
33 const struct trace_print_flags *symbol_array); 33 const struct trace_print_flags *symbol_array);
34 34
35#if BITS_PER_LONG == 32 35#if BITS_PER_LONG == 32
36const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, 36const char *trace_print_symbols_seq_u64(struct trace_seq *p,
37 unsigned long long val, 37 unsigned long long val,
38 const struct trace_print_flags_u64 38 const struct trace_print_flags_u64
39 *symbol_array); 39 *symbol_array);
40#endif 40#endif
41 41
42const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 42const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
43 unsigned int bitmask_size); 43 unsigned int bitmask_size);
44 44
45const char *ftrace_print_hex_seq(struct trace_seq *p, 45const char *trace_print_hex_seq(struct trace_seq *p,
46 const unsigned char *buf, int len); 46 const unsigned char *buf, int len);
47 47
48const char *ftrace_print_array_seq(struct trace_seq *p, 48const char *trace_print_array_seq(struct trace_seq *p,
49 const void *buf, int count, 49 const void *buf, int count,
50 size_t el_size); 50 size_t el_size);
51 51
52struct trace_iterator; 52struct trace_iterator;
53struct trace_event; 53struct trace_event;
54 54
55int ftrace_raw_output_prep(struct trace_iterator *iter, 55int trace_raw_output_prep(struct trace_iterator *iter,
56 struct trace_event *event); 56 struct trace_event *event);
57 57
58/* 58/*
59 * The trace entry - the most basic unit of tracing. This is what 59 * The trace entry - the most basic unit of tracing. This is what
@@ -68,7 +68,7 @@ struct trace_entry {
68 int pid; 68 int pid;
69}; 69};
70 70
71#define FTRACE_MAX_EVENT \ 71#define TRACE_EVENT_TYPE_MAX \
72 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) 72 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
73 73
74/* 74/*
@@ -132,8 +132,8 @@ struct trace_event {
132 struct trace_event_functions *funcs; 132 struct trace_event_functions *funcs;
133}; 133};
134 134
135extern int register_ftrace_event(struct trace_event *event); 135extern int register_trace_event(struct trace_event *event);
136extern int unregister_ftrace_event(struct trace_event *event); 136extern int unregister_trace_event(struct trace_event *event);
137 137
138/* Return values for print_line callback */ 138/* Return values for print_line callback */
139enum print_line_t { 139enum print_line_t {
@@ -157,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s)
157void tracing_generic_entry_update(struct trace_entry *entry, 157void tracing_generic_entry_update(struct trace_entry *entry,
158 unsigned long flags, 158 unsigned long flags,
159 int pc); 159 int pc);
160struct ftrace_event_file; 160struct trace_event_file;
161 161
162struct ring_buffer_event * 162struct ring_buffer_event *
163trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, 163trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
164 struct ftrace_event_file *ftrace_file, 164 struct trace_event_file *trace_file,
165 int type, unsigned long len, 165 int type, unsigned long len,
166 unsigned long flags, int pc); 166 unsigned long flags, int pc);
167struct ring_buffer_event * 167struct ring_buffer_event *
@@ -183,7 +183,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
183 183
184void tracing_record_cmdline(struct task_struct *tsk); 184void tracing_record_cmdline(struct task_struct *tsk);
185 185
186int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); 186int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
187 187
188struct event_filter; 188struct event_filter;
189 189
@@ -200,50 +200,39 @@ enum trace_reg {
200#endif 200#endif
201}; 201};
202 202
203struct ftrace_event_call; 203struct trace_event_call;
204 204
205struct ftrace_event_class { 205struct trace_event_class {
206 const char *system; 206 const char *system;
207 void *probe; 207 void *probe;
208#ifdef CONFIG_PERF_EVENTS 208#ifdef CONFIG_PERF_EVENTS
209 void *perf_probe; 209 void *perf_probe;
210#endif 210#endif
211 int (*reg)(struct ftrace_event_call *event, 211 int (*reg)(struct trace_event_call *event,
212 enum trace_reg type, void *data); 212 enum trace_reg type, void *data);
213 int (*define_fields)(struct ftrace_event_call *); 213 int (*define_fields)(struct trace_event_call *);
214 struct list_head *(*get_fields)(struct ftrace_event_call *); 214 struct list_head *(*get_fields)(struct trace_event_call *);
215 struct list_head fields; 215 struct list_head fields;
216 int (*raw_init)(struct ftrace_event_call *); 216 int (*raw_init)(struct trace_event_call *);
217}; 217};
218 218
219extern int ftrace_event_reg(struct ftrace_event_call *event, 219extern int trace_event_reg(struct trace_event_call *event,
220 enum trace_reg type, void *data); 220 enum trace_reg type, void *data);
221 221
222int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event, 222struct trace_event_buffer {
223 char *fmt, ...);
224
225int ftrace_event_define_field(struct ftrace_event_call *call,
226 char *type, int len, char *item, int offset,
227 int field_size, int sign, int filter);
228
229struct ftrace_event_buffer {
230 struct ring_buffer *buffer; 223 struct ring_buffer *buffer;
231 struct ring_buffer_event *event; 224 struct ring_buffer_event *event;
232 struct ftrace_event_file *ftrace_file; 225 struct trace_event_file *trace_file;
233 void *entry; 226 void *entry;
234 unsigned long flags; 227 unsigned long flags;
235 int pc; 228 int pc;
236}; 229};
237 230
238void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, 231void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
239 struct ftrace_event_file *ftrace_file, 232 struct trace_event_file *trace_file,
240 unsigned long len); 233 unsigned long len);
241 234
242void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer); 235void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
243
244int ftrace_event_define_field(struct ftrace_event_call *call,
245 char *type, int len, char *item, int offset,
246 int field_size, int sign, int filter);
247 236
248enum { 237enum {
249 TRACE_EVENT_FL_FILTERED_BIT, 238 TRACE_EVENT_FL_FILTERED_BIT,
@@ -261,11 +250,11 @@ enum {
261 * FILTERED - The event has a filter attached 250 * FILTERED - The event has a filter attached
262 * CAP_ANY - Any user can enable for perf 251 * CAP_ANY - Any user can enable for perf
263 * NO_SET_FILTER - Set when filter has error and is to be ignored 252 * NO_SET_FILTER - Set when filter has error and is to be ignored
264 * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file 253 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
265 * WAS_ENABLED - Set and stays set when an event was ever enabled 254 * WAS_ENABLED - Set and stays set when an event was ever enabled
266 * (used for module unloading, if a module event is enabled, 255 * (used for module unloading, if a module event is enabled,
267 * it is best to clear the buffers that used it). 256 * it is best to clear the buffers that used it).
268 * USE_CALL_FILTER - For ftrace internal events, don't use file filter 257 * USE_CALL_FILTER - For trace internal events, don't use file filter
269 * TRACEPOINT - Event is a tracepoint 258 * TRACEPOINT - Event is a tracepoint
270 * KPROBE - Event is a kprobe 259 * KPROBE - Event is a kprobe
271 */ 260 */
@@ -280,9 +269,9 @@ enum {
280 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 269 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
281}; 270};
282 271
283struct ftrace_event_call { 272struct trace_event_call {
284 struct list_head list; 273 struct list_head list;
285 struct ftrace_event_class *class; 274 struct trace_event_class *class;
286 union { 275 union {
287 char *name; 276 char *name;
288 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ 277 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
@@ -297,7 +286,7 @@ struct ftrace_event_call {
297 * bit 0: filter_active 286 * bit 0: filter_active
298 * bit 1: allow trace by non root (cap any) 287 * bit 1: allow trace by non root (cap any)
299 * bit 2: failed to apply filter 288 * bit 2: failed to apply filter
300 * bit 3: ftrace internal event (do not enable) 289 * bit 3: trace internal event (do not enable)
301 * bit 4: Event was enabled by module 290 * bit 4: Event was enabled by module
302 * bit 5: use call filter rather than file filter 291 * bit 5: use call filter rather than file filter
303 * bit 6: Event is a tracepoint 292 * bit 6: Event is a tracepoint
@@ -309,13 +298,13 @@ struct ftrace_event_call {
309 struct hlist_head __percpu *perf_events; 298 struct hlist_head __percpu *perf_events;
310 struct bpf_prog *prog; 299 struct bpf_prog *prog;
311 300
312 int (*perf_perm)(struct ftrace_event_call *, 301 int (*perf_perm)(struct trace_event_call *,
313 struct perf_event *); 302 struct perf_event *);
314#endif 303#endif
315}; 304};
316 305
317static inline const char * 306static inline const char *
318ftrace_event_name(struct ftrace_event_call *call) 307trace_event_name(struct trace_event_call *call)
319{ 308{
320 if (call->flags & TRACE_EVENT_FL_TRACEPOINT) 309 if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
321 return call->tp ? call->tp->name : NULL; 310 return call->tp ? call->tp->name : NULL;
@@ -324,21 +313,21 @@ ftrace_event_name(struct ftrace_event_call *call)
324} 313}
325 314
326struct trace_array; 315struct trace_array;
327struct ftrace_subsystem_dir; 316struct trace_subsystem_dir;
328 317
329enum { 318enum {
330 FTRACE_EVENT_FL_ENABLED_BIT, 319 EVENT_FILE_FL_ENABLED_BIT,
331 FTRACE_EVENT_FL_RECORDED_CMD_BIT, 320 EVENT_FILE_FL_RECORDED_CMD_BIT,
332 FTRACE_EVENT_FL_FILTERED_BIT, 321 EVENT_FILE_FL_FILTERED_BIT,
333 FTRACE_EVENT_FL_NO_SET_FILTER_BIT, 322 EVENT_FILE_FL_NO_SET_FILTER_BIT,
334 FTRACE_EVENT_FL_SOFT_MODE_BIT, 323 EVENT_FILE_FL_SOFT_MODE_BIT,
335 FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 324 EVENT_FILE_FL_SOFT_DISABLED_BIT,
336 FTRACE_EVENT_FL_TRIGGER_MODE_BIT, 325 EVENT_FILE_FL_TRIGGER_MODE_BIT,
337 FTRACE_EVENT_FL_TRIGGER_COND_BIT, 326 EVENT_FILE_FL_TRIGGER_COND_BIT,
338}; 327};
339 328
340/* 329/*
341 * Ftrace event file flags: 330 * Event file flags:
342 * ENABLED - The event is enabled 331 * ENABLED - The event is enabled
343 * RECORDED_CMD - The comms should be recorded at sched_switch 332 * RECORDED_CMD - The comms should be recorded at sched_switch
344 * FILTERED - The event has a filter attached 333 * FILTERED - The event has a filter attached
@@ -350,23 +339,23 @@ enum {
350 * TRIGGER_COND - When set, one or more triggers has an associated filter 339 * TRIGGER_COND - When set, one or more triggers has an associated filter
351 */ 340 */
352enum { 341enum {
353 FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), 342 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
354 FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), 343 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
355 FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), 344 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
356 FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), 345 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
357 FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), 346 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
358 FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), 347 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
359 FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT), 348 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
360 FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT), 349 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
361}; 350};
362 351
363struct ftrace_event_file { 352struct trace_event_file {
364 struct list_head list; 353 struct list_head list;
365 struct ftrace_event_call *event_call; 354 struct trace_event_call *event_call;
366 struct event_filter *filter; 355 struct event_filter *filter;
367 struct dentry *dir; 356 struct dentry *dir;
368 struct trace_array *tr; 357 struct trace_array *tr;
369 struct ftrace_subsystem_dir *system; 358 struct trace_subsystem_dir *system;
370 struct list_head triggers; 359 struct list_head triggers;
371 360
372 /* 361 /*
@@ -399,7 +388,7 @@ struct ftrace_event_file {
399 early_initcall(trace_init_flags_##name); 388 early_initcall(trace_init_flags_##name);
400 389
401#define __TRACE_EVENT_PERF_PERM(name, expr...) \ 390#define __TRACE_EVENT_PERF_PERM(name, expr...) \
402 static int perf_perm_##name(struct ftrace_event_call *tp_event, \ 391 static int perf_perm_##name(struct trace_event_call *tp_event, \
403 struct perf_event *p_event) \ 392 struct perf_event *p_event) \
404 { \ 393 { \
405 return ({ expr; }); \ 394 return ({ expr; }); \
@@ -425,19 +414,19 @@ enum event_trigger_type {
425 414
426extern int filter_match_preds(struct event_filter *filter, void *rec); 415extern int filter_match_preds(struct event_filter *filter, void *rec);
427 416
428extern int filter_check_discard(struct ftrace_event_file *file, void *rec, 417extern int filter_check_discard(struct trace_event_file *file, void *rec,
429 struct ring_buffer *buffer, 418 struct ring_buffer *buffer,
430 struct ring_buffer_event *event); 419 struct ring_buffer_event *event);
431extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 420extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
432 struct ring_buffer *buffer, 421 struct ring_buffer *buffer,
433 struct ring_buffer_event *event); 422 struct ring_buffer_event *event);
434extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file, 423extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
435 void *rec); 424 void *rec);
436extern void event_triggers_post_call(struct ftrace_event_file *file, 425extern void event_triggers_post_call(struct trace_event_file *file,
437 enum event_trigger_type tt); 426 enum event_trigger_type tt);
438 427
439/** 428/**
440 * ftrace_trigger_soft_disabled - do triggers and test if soft disabled 429 * trace_trigger_soft_disabled - do triggers and test if soft disabled
441 * @file: The file pointer of the event to test 430 * @file: The file pointer of the event to test
442 * 431 *
443 * If any triggers without filters are attached to this event, they 432 * If any triggers without filters are attached to this event, they
@@ -446,14 +435,14 @@ extern void event_triggers_post_call(struct ftrace_event_file *file,
446 * otherwise false. 435 * otherwise false.
447 */ 436 */
448static inline bool 437static inline bool
449ftrace_trigger_soft_disabled(struct ftrace_event_file *file) 438trace_trigger_soft_disabled(struct trace_event_file *file)
450{ 439{
451 unsigned long eflags = file->flags; 440 unsigned long eflags = file->flags;
452 441
453 if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { 442 if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
454 if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) 443 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
455 event_triggers_call(file, NULL); 444 event_triggers_call(file, NULL);
456 if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) 445 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
457 return true; 446 return true;
458 } 447 }
459 return false; 448 return false;
@@ -473,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
473 * Returns true if the event is discarded, false otherwise. 462 * Returns true if the event is discarded, false otherwise.
474 */ 463 */
475static inline bool 464static inline bool
476__event_trigger_test_discard(struct ftrace_event_file *file, 465__event_trigger_test_discard(struct trace_event_file *file,
477 struct ring_buffer *buffer, 466 struct ring_buffer *buffer,
478 struct ring_buffer_event *event, 467 struct ring_buffer_event *event,
479 void *entry, 468 void *entry,
@@ -481,10 +470,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
481{ 470{
482 unsigned long eflags = file->flags; 471 unsigned long eflags = file->flags;
483 472
484 if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) 473 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
485 *tt = event_triggers_call(file, entry); 474 *tt = event_triggers_call(file, entry);
486 475
487 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags)) 476 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
488 ring_buffer_discard_commit(buffer, event); 477 ring_buffer_discard_commit(buffer, event);
489 else if (!filter_check_discard(file, entry, buffer, event)) 478 else if (!filter_check_discard(file, entry, buffer, event))
490 return false; 479 return false;
@@ -506,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
506 * if the event is soft disabled and should be discarded. 495 * if the event is soft disabled and should be discarded.
507 */ 496 */
508static inline void 497static inline void
509event_trigger_unlock_commit(struct ftrace_event_file *file, 498event_trigger_unlock_commit(struct trace_event_file *file,
510 struct ring_buffer *buffer, 499 struct ring_buffer *buffer,
511 struct ring_buffer_event *event, 500 struct ring_buffer_event *event,
512 void *entry, unsigned long irq_flags, int pc) 501 void *entry, unsigned long irq_flags, int pc)
@@ -537,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
537 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 526 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
538 */ 527 */
539static inline void 528static inline void
540event_trigger_unlock_commit_regs(struct ftrace_event_file *file, 529event_trigger_unlock_commit_regs(struct trace_event_file *file,
541 struct ring_buffer *buffer, 530 struct ring_buffer *buffer,
542 struct ring_buffer_event *event, 531 struct ring_buffer_event *event,
543 void *entry, unsigned long irq_flags, int pc, 532 void *entry, unsigned long irq_flags, int pc,
@@ -570,12 +559,12 @@ enum {
570 FILTER_TRACE_FN, 559 FILTER_TRACE_FN,
571}; 560};
572 561
573extern int trace_event_raw_init(struct ftrace_event_call *call); 562extern int trace_event_raw_init(struct trace_event_call *call);
574extern int trace_define_field(struct ftrace_event_call *call, const char *type, 563extern int trace_define_field(struct trace_event_call *call, const char *type,
575 const char *name, int offset, int size, 564 const char *name, int offset, int size,
576 int is_signed, int filter_type); 565 int is_signed, int filter_type);
577extern int trace_add_event_call(struct ftrace_event_call *call); 566extern int trace_add_event_call(struct trace_event_call *call);
578extern int trace_remove_event_call(struct ftrace_event_call *call); 567extern int trace_remove_event_call(struct trace_event_call *call);
579 568
580#define is_signed_type(type) (((type)(-1)) < (type)1) 569#define is_signed_type(type) (((type)(-1)) < (type)1)
581 570
@@ -624,4 +613,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
624} 613}
625#endif 614#endif
626 615
627#endif /* _LINUX_FTRACE_EVENT_H */ 616#endif /* _LINUX_TRACE_EVENT_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index d76631f615c2..ad6c8913aa3e 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -422,7 +422,7 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
422 422
423extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, 423extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
424 const char *routine); 424 const char *routine);
425extern char *tty_name(struct tty_struct *tty, char *buf); 425extern const char *tty_name(const struct tty_struct *tty);
426extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); 426extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
427extern int tty_check_change(struct tty_struct *tty); 427extern int tty_check_change(struct tty_struct *tty);
428extern void __stop_tty(struct tty_struct *tty); 428extern void __stop_tty(struct tty_struct *tty);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 4b4439e75f45..df89c9bcba7d 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -68,11 +68,12 @@ struct u64_stats_sync {
68}; 68};
69 69
70 70
71static inline void u64_stats_init(struct u64_stats_sync *syncp)
72{
71#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) 73#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
72# define u64_stats_init(syncp) seqcount_init(syncp.seq) 74 seqcount_init(&syncp->seq);
73#else
74# define u64_stats_init(syncp) do { } while (0)
75#endif 75#endif
76}
76 77
77static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) 78static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
78{ 79{
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h
new file mode 100644
index 000000000000..388f6e08b9d4
--- /dev/null
+++ b/include/linux/ulpi/driver.h
@@ -0,0 +1,60 @@
1#ifndef __LINUX_ULPI_DRIVER_H
2#define __LINUX_ULPI_DRIVER_H
3
4#include <linux/mod_devicetable.h>
5
6#include <linux/device.h>
7
8struct ulpi_ops;
9
10/**
11 * struct ulpi - describes ULPI PHY device
12 * @id: vendor and product ids for ULPI device
13 * @ops: I/O access
14 * @dev: device interface
15 */
16struct ulpi {
17 struct ulpi_device_id id;
18 struct ulpi_ops *ops;
19 struct device dev;
20};
21
22#define to_ulpi_dev(d) container_of(d, struct ulpi, dev)
23
24static inline void ulpi_set_drvdata(struct ulpi *ulpi, void *data)
25{
26 dev_set_drvdata(&ulpi->dev, data);
27}
28
29static inline void *ulpi_get_drvdata(struct ulpi *ulpi)
30{
31 return dev_get_drvdata(&ulpi->dev);
32}
33
34/**
35 * struct ulpi_driver - describes a ULPI PHY driver
36 * @id_table: array of device identifiers supported by this driver
37 * @probe: binds this driver to ULPI device
38 * @remove: unbinds this driver from ULPI device
39 * @driver: the name and owner members must be initialized by the drivers
40 */
41struct ulpi_driver {
42 const struct ulpi_device_id *id_table;
43 int (*probe)(struct ulpi *ulpi);
44 void (*remove)(struct ulpi *ulpi);
45 struct device_driver driver;
46};
47
48#define to_ulpi_driver(d) container_of(d, struct ulpi_driver, driver)
49
50int ulpi_register_driver(struct ulpi_driver *drv);
51void ulpi_unregister_driver(struct ulpi_driver *drv);
52
53#define module_ulpi_driver(__ulpi_driver) \
54 module_driver(__ulpi_driver, ulpi_register_driver, \
55 ulpi_unregister_driver)
56
57int ulpi_read(struct ulpi *ulpi, u8 addr);
58int ulpi_write(struct ulpi *ulpi, u8 addr, u8 val);
59
60#endif /* __LINUX_ULPI_DRIVER_H */
diff --git a/include/linux/ulpi/interface.h b/include/linux/ulpi/interface.h
new file mode 100644
index 000000000000..4de8ab491038
--- /dev/null
+++ b/include/linux/ulpi/interface.h
@@ -0,0 +1,23 @@
1#ifndef __LINUX_ULPI_INTERFACE_H
2#define __LINUX_ULPI_INTERFACE_H
3
4#include <linux/types.h>
5
6struct ulpi;
7
8/**
9 * struct ulpi_ops - ULPI register access
10 * @dev: the interface provider
11 * @read: read operation for ULPI register access
12 * @write: write operation for ULPI register access
13 */
14struct ulpi_ops {
15 struct device *dev;
16 int (*read)(struct ulpi_ops *ops, u8 addr);
17 int (*write)(struct ulpi_ops *ops, u8 addr, u8 val);
18};
19
20struct ulpi *ulpi_register_interface(struct device *, struct ulpi_ops *);
21void ulpi_unregister_interface(struct ulpi *);
22
23#endif /* __LINUX_ULPI_INTERFACE_H */
diff --git a/include/linux/ulpi/regs.h b/include/linux/ulpi/regs.h
new file mode 100644
index 000000000000..b5b8b8804560
--- /dev/null
+++ b/include/linux/ulpi/regs.h
@@ -0,0 +1,130 @@
1#ifndef __LINUX_ULPI_REGS_H
2#define __LINUX_ULPI_REGS_H
3
4/*
5 * Macros for Set and Clear
6 * See ULPI 1.1 specification to find the registers with Set and Clear offsets
7 */
8#define ULPI_SET(a) (a + 1)
9#define ULPI_CLR(a) (a + 2)
10
11/*
12 * Register Map
13 */
14#define ULPI_VENDOR_ID_LOW 0x00
15#define ULPI_VENDOR_ID_HIGH 0x01
16#define ULPI_PRODUCT_ID_LOW 0x02
17#define ULPI_PRODUCT_ID_HIGH 0x03
18#define ULPI_FUNC_CTRL 0x04
19#define ULPI_IFC_CTRL 0x07
20#define ULPI_OTG_CTRL 0x0a
21#define ULPI_USB_INT_EN_RISE 0x0d
22#define ULPI_USB_INT_EN_FALL 0x10
23#define ULPI_USB_INT_STS 0x13
24#define ULPI_USB_INT_LATCH 0x14
25#define ULPI_DEBUG 0x15
26#define ULPI_SCRATCH 0x16
27/* Optional Carkit Registers */
28#define ULPI_CARKIT_CTRL 0x19
29#define ULPI_CARKIT_INT_DELAY 0x1c
30#define ULPI_CARKIT_INT_EN 0x1d
31#define ULPI_CARKIT_INT_STS 0x20
32#define ULPI_CARKIT_INT_LATCH 0x21
33#define ULPI_CARKIT_PLS_CTRL 0x22
34/* Other Optional Registers */
35#define ULPI_TX_POS_WIDTH 0x25
36#define ULPI_TX_NEG_WIDTH 0x26
37#define ULPI_POLARITY_RECOVERY 0x27
38/* Access Extended Register Set */
39#define ULPI_ACCESS_EXTENDED 0x2f
40/* Vendor Specific */
41#define ULPI_VENDOR_SPECIFIC 0x30
42/* Extended Registers */
43#define ULPI_EXT_VENDOR_SPECIFIC 0x80
44
45/*
46 * Register Bits
47 */
48
49/* Function Control */
50#define ULPI_FUNC_CTRL_XCVRSEL BIT(0)
51#define ULPI_FUNC_CTRL_XCVRSEL_MASK 0x3
52#define ULPI_FUNC_CTRL_HIGH_SPEED 0x0
53#define ULPI_FUNC_CTRL_FULL_SPEED 0x1
54#define ULPI_FUNC_CTRL_LOW_SPEED 0x2
55#define ULPI_FUNC_CTRL_FS4LS 0x3
56#define ULPI_FUNC_CTRL_TERMSELECT BIT(2)
57#define ULPI_FUNC_CTRL_OPMODE BIT(3)
58#define ULPI_FUNC_CTRL_OPMODE_MASK (0x3 << 3)
59#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0x0 << 3)
60#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (0x1 << 3)
61#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (0x2 << 3)
62#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (0x3 << 3)
63#define ULPI_FUNC_CTRL_RESET BIT(5)
64#define ULPI_FUNC_CTRL_SUSPENDM BIT(6)
65
66/* Interface Control */
67#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE BIT(0)
68#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE BIT(1)
69#define ULPI_IFC_CTRL_CARKITMODE BIT(2)
70#define ULPI_IFC_CTRL_CLOCKSUSPENDM BIT(3)
71#define ULPI_IFC_CTRL_AUTORESUME BIT(4)
72#define ULPI_IFC_CTRL_EXTERNAL_VBUS BIT(5)
73#define ULPI_IFC_CTRL_PASSTHRU BIT(6)
74#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE BIT(7)
75
76/* OTG Control */
77#define ULPI_OTG_CTRL_ID_PULLUP BIT(0)
78#define ULPI_OTG_CTRL_DP_PULLDOWN BIT(1)
79#define ULPI_OTG_CTRL_DM_PULLDOWN BIT(2)
80#define ULPI_OTG_CTRL_DISCHRGVBUS BIT(3)
81#define ULPI_OTG_CTRL_CHRGVBUS BIT(4)
82#define ULPI_OTG_CTRL_DRVVBUS BIT(5)
83#define ULPI_OTG_CTRL_DRVVBUS_EXT BIT(6)
84#define ULPI_OTG_CTRL_EXTVBUSIND BIT(7)
85
86/* USB Interrupt Enable Rising,
87 * USB Interrupt Enable Falling,
88 * USB Interrupt Status and
89 * USB Interrupt Latch
90 */
91#define ULPI_INT_HOST_DISCONNECT BIT(0)
92#define ULPI_INT_VBUS_VALID BIT(1)
93#define ULPI_INT_SESS_VALID BIT(2)
94#define ULPI_INT_SESS_END BIT(3)
95#define ULPI_INT_IDGRD BIT(4)
96
97/* Debug */
98#define ULPI_DEBUG_LINESTATE0 BIT(0)
99#define ULPI_DEBUG_LINESTATE1 BIT(1)
100
101/* Carkit Control */
102#define ULPI_CARKIT_CTRL_CARKITPWR BIT(0)
103#define ULPI_CARKIT_CTRL_IDGNDDRV BIT(1)
104#define ULPI_CARKIT_CTRL_TXDEN BIT(2)
105#define ULPI_CARKIT_CTRL_RXDEN BIT(3)
106#define ULPI_CARKIT_CTRL_SPKLEFTEN BIT(4)
107#define ULPI_CARKIT_CTRL_SPKRIGHTEN BIT(5)
108#define ULPI_CARKIT_CTRL_MICEN BIT(6)
109
110/* Carkit Interrupt Enable */
111#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE BIT(0)
112#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL BIT(1)
113#define ULPI_CARKIT_INT_EN_CARINTDET BIT(2)
114#define ULPI_CARKIT_INT_EN_DP_RISE BIT(3)
115#define ULPI_CARKIT_INT_EN_DP_FALL BIT(4)
116
117/* Carkit Interrupt Status and
118 * Carkit Interrupt Latch
119 */
120#define ULPI_CARKIT_INT_IDFLOAT BIT(0)
121#define ULPI_CARKIT_INT_CARINTDET BIT(1)
122#define ULPI_CARKIT_INT_DP BIT(2)
123
124/* Carkit Pulse Control*/
125#define ULPI_CARKIT_PLS_CTRL_TXPLSEN BIT(0)
126#define ULPI_CARKIT_PLS_CTRL_RXPLSEN BIT(1)
127#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN BIT(2)
128#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN BIT(3)
129
130#endif /* __LINUX_ULPI_REGS_H */
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 7c9b484735c5..1f6526c76ee8 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -80,6 +80,9 @@
80#define CDC_NCM_TIMER_INTERVAL_MIN 5UL 80#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
81#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) 81#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
82 82
83/* Driver flags */
84#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
85
83#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ 86#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
84 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) 87 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
85#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) 88#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
@@ -103,9 +106,11 @@ struct cdc_ncm_ctx {
103 106
104 spinlock_t mtx; 107 spinlock_t mtx;
105 atomic_t stop; 108 atomic_t stop;
109 int drvflags;
106 110
107 u32 timer_interval; 111 u32 timer_interval;
108 u32 max_ndp_size; 112 u32 max_ndp_size;
113 struct usb_cdc_ncm_ndp16 *delayed_ndp16;
109 114
110 u32 tx_timer_pending; 115 u32 tx_timer_pending;
111 u32 tx_curr_frame_num; 116 u32 tx_curr_frame_num;
@@ -133,7 +138,7 @@ struct cdc_ncm_ctx {
133}; 138};
134 139
135u8 cdc_ncm_select_altsetting(struct usb_interface *intf); 140u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
136int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 141int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
137void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 142void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
138struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); 143struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
139int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); 144int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 68b1e836dff1..c9aa7792de10 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -622,8 +622,6 @@ extern struct list_head usb_bus_list;
622extern struct mutex usb_bus_list_lock; 622extern struct mutex usb_bus_list_lock;
623extern wait_queue_head_t usb_kill_urb_queue; 623extern wait_queue_head_t usb_kill_urb_queue;
624 624
625extern int usb_find_interface_driver(struct usb_device *dev,
626 struct usb_interface *interface);
627 625
628#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) 626#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
629 627
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 7dbecf9a4656..e55a1504266e 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -18,6 +18,7 @@
18#ifndef __ASM_ARCH_MSM_HSUSB_H 18#ifndef __ASM_ARCH_MSM_HSUSB_H
19#define __ASM_ARCH_MSM_HSUSB_H 19#define __ASM_ARCH_MSM_HSUSB_H
20 20
21#include <linux/extcon.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/usb/otg.h> 23#include <linux/usb/otg.h>
23#include <linux/clk.h> 24#include <linux/clk.h>
@@ -120,6 +121,17 @@ struct msm_otg_platform_data {
120}; 121};
121 122
122/** 123/**
124 * struct msm_usb_cable - structure for exteternal connector cable
125 * state tracking
126 * @nb: hold event notification callback
127 * @conn: used for notification registration
128 */
129struct msm_usb_cable {
130 struct notifier_block nb;
131 struct extcon_specific_cable_nb conn;
132};
133
134/**
123 * struct msm_otg: OTG driver data. Shared by HCD and DCD. 135 * struct msm_otg: OTG driver data. Shared by HCD and DCD.
124 * @otg: USB OTG Transceiver structure. 136 * @otg: USB OTG Transceiver structure.
125 * @pdata: otg device platform data. 137 * @pdata: otg device platform data.
@@ -138,6 +150,11 @@ struct msm_otg_platform_data {
138 * @chg_type: The type of charger attached. 150 * @chg_type: The type of charger attached.
139 * @dcd_retires: The retry count used to track Data contact 151 * @dcd_retires: The retry count used to track Data contact
140 * detection process. 152 * detection process.
153 * @manual_pullup: true if VBUS is not routed to USB controller/phy
154 * and controller driver therefore enables pull-up explicitly before
155 * starting controller using usbcmd run/stop bit.
156 * @vbus: VBUS signal state trakining, using extcon framework
157 * @id: ID signal state trakining, using extcon framework
141 */ 158 */
142struct msm_otg { 159struct msm_otg {
143 struct usb_phy phy; 160 struct usb_phy phy;
@@ -166,6 +183,11 @@ struct msm_otg {
166 struct reset_control *phy_rst; 183 struct reset_control *phy_rst;
167 struct reset_control *link_rst; 184 struct reset_control *link_rst;
168 int vdd_levels[3]; 185 int vdd_levels[3];
186
187 bool manual_pullup;
188
189 struct msm_usb_cable vbus;
190 struct msm_usb_cable id;
169}; 191};
170 192
171#endif 193#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index a29f6030afb1..e159b39f67a2 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -21,6 +21,8 @@
21 21
22#define USB_AHBBURST (MSM_USB_BASE + 0x0090) 22#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
23#define USB_AHBMODE (MSM_USB_BASE + 0x0098) 23#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
24#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
25
24#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ 26#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
25 27
26#define USB_USBCMD (MSM_USB_BASE + 0x0140) 28#define USB_USBCMD (MSM_USB_BASE + 0x0140)
@@ -30,6 +32,9 @@
30#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240) 32#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240)
31#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278) 33#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
32 34
35#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7)
36#define USBCMD_SESS_VLD_CTRL BIT(25)
37
33#define USBCMD_RESET 2 38#define USBCMD_RESET 2
34#define USB_USBINTR (MSM_USB_BASE + 0x0148) 39#define USB_USBINTR (MSM_USB_BASE + 0x0148)
35 40
@@ -50,6 +55,10 @@
50#define ULPI_PWR_CLK_MNG_REG 0x88 55#define ULPI_PWR_CLK_MNG_REG 0x88
51#define OTG_COMP_DISABLE BIT(0) 56#define OTG_COMP_DISABLE BIT(0)
52 57
58#define ULPI_MISC_A 0x96
59#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
60#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
61
53#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */ 62#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
54#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ 63#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
55#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ 64#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h
index 148b8fa5b1a2..725120224472 100644
--- a/include/linux/usb/net2280.h
+++ b/include/linux/usb/net2280.h
@@ -168,6 +168,9 @@ struct net2280_regs {
168#define ENDPOINT_B_INTERRUPT 2 168#define ENDPOINT_B_INTERRUPT 2
169#define ENDPOINT_A_INTERRUPT 1 169#define ENDPOINT_A_INTERRUPT 1
170#define ENDPOINT_0_INTERRUPT 0 170#define ENDPOINT_0_INTERRUPT 0
171#define USB3380_IRQSTAT0_EP_INTR_MASK_IN (0xF << 17)
172#define USB3380_IRQSTAT0_EP_INTR_MASK_OUT (0xF << 1)
173
171 u32 irqstat1; 174 u32 irqstat1;
172#define POWER_STATE_CHANGE_INTERRUPT 27 175#define POWER_STATE_CHANGE_INTERRUPT 27
173#define PCI_ARBITER_TIMEOUT_INTERRUPT 26 176#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index bc91b5d380fd..e39f251cf861 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -205,6 +205,8 @@ extern struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index);
205extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index); 205extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index);
206extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, 206extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
207 const char *phandle, u8 index); 207 const char *phandle, u8 index);
208extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
209 struct device_node *node, struct notifier_block *nb);
208extern void usb_put_phy(struct usb_phy *); 210extern void usb_put_phy(struct usb_phy *);
209extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); 211extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
210extern int usb_bind_phy(const char *dev_name, u8 index, 212extern int usb_bind_phy(const char *dev_name, u8 index,
@@ -238,6 +240,12 @@ static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
238 return ERR_PTR(-ENXIO); 240 return ERR_PTR(-ENXIO);
239} 241}
240 242
243static inline struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
244 struct device_node *node, struct notifier_block *nb)
245{
246 return ERR_PTR(-ENXIO);
247}
248
241static inline void usb_put_phy(struct usb_phy *x) 249static inline void usb_put_phy(struct usb_phy *x)
242{ 250{
243} 251}
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index f06529c14141..3dd5a781da99 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -169,8 +169,7 @@ struct renesas_usbhs_driver_param {
169#define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */ 169#define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */
170}; 170};
171 171
172#define USBHS_TYPE_R8A7790 1 172#define USBHS_TYPE_RCAR_GEN2 1
173#define USBHS_TYPE_R8A7791 2
174 173
175/* 174/*
176 * option: 175 * option:
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 5c295c26ad37..5f07407a367a 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -12,6 +12,8 @@
12#define __LINUX_USB_ULPI_H 12#define __LINUX_USB_ULPI_H
13 13
14#include <linux/usb/otg.h> 14#include <linux/usb/otg.h>
15#include <linux/ulpi/regs.h>
16
15/*-------------------------------------------------------------------------*/ 17/*-------------------------------------------------------------------------*/
16 18
17/* 19/*
@@ -49,138 +51,6 @@
49 51
50/*-------------------------------------------------------------------------*/ 52/*-------------------------------------------------------------------------*/
51 53
52/*
53 * Macros for Set and Clear
54 * See ULPI 1.1 specification to find the registers with Set and Clear offsets
55 */
56#define ULPI_SET(a) (a + 1)
57#define ULPI_CLR(a) (a + 2)
58
59/*-------------------------------------------------------------------------*/
60
61/*
62 * Register Map
63 */
64#define ULPI_VENDOR_ID_LOW 0x00
65#define ULPI_VENDOR_ID_HIGH 0x01
66#define ULPI_PRODUCT_ID_LOW 0x02
67#define ULPI_PRODUCT_ID_HIGH 0x03
68#define ULPI_FUNC_CTRL 0x04
69#define ULPI_IFC_CTRL 0x07
70#define ULPI_OTG_CTRL 0x0a
71#define ULPI_USB_INT_EN_RISE 0x0d
72#define ULPI_USB_INT_EN_FALL 0x10
73#define ULPI_USB_INT_STS 0x13
74#define ULPI_USB_INT_LATCH 0x14
75#define ULPI_DEBUG 0x15
76#define ULPI_SCRATCH 0x16
77/* Optional Carkit Registers */
78#define ULPI_CARCIT_CTRL 0x19
79#define ULPI_CARCIT_INT_DELAY 0x1c
80#define ULPI_CARCIT_INT_EN 0x1d
81#define ULPI_CARCIT_INT_STS 0x20
82#define ULPI_CARCIT_INT_LATCH 0x21
83#define ULPI_CARCIT_PLS_CTRL 0x22
84/* Other Optional Registers */
85#define ULPI_TX_POS_WIDTH 0x25
86#define ULPI_TX_NEG_WIDTH 0x26
87#define ULPI_POLARITY_RECOVERY 0x27
88/* Access Extended Register Set */
89#define ULPI_ACCESS_EXTENDED 0x2f
90/* Vendor Specific */
91#define ULPI_VENDOR_SPECIFIC 0x30
92/* Extended Registers */
93#define ULPI_EXT_VENDOR_SPECIFIC 0x80
94
95/*-------------------------------------------------------------------------*/
96
97/*
98 * Register Bits
99 */
100
101/* Function Control */
102#define ULPI_FUNC_CTRL_XCVRSEL (1 << 0)
103#define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0)
104#define ULPI_FUNC_CTRL_HIGH_SPEED (0 << 0)
105#define ULPI_FUNC_CTRL_FULL_SPEED (1 << 0)
106#define ULPI_FUNC_CTRL_LOW_SPEED (2 << 0)
107#define ULPI_FUNC_CTRL_FS4LS (3 << 0)
108#define ULPI_FUNC_CTRL_TERMSELECT (1 << 2)
109#define ULPI_FUNC_CTRL_OPMODE (1 << 3)
110#define ULPI_FUNC_CTRL_OPMODE_MASK (3 << 3)
111#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0 << 3)
112#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (1 << 3)
113#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (2 << 3)
114#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (3 << 3)
115#define ULPI_FUNC_CTRL_RESET (1 << 5)
116#define ULPI_FUNC_CTRL_SUSPENDM (1 << 6)
117
118/* Interface Control */
119#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE (1 << 0)
120#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE (1 << 1)
121#define ULPI_IFC_CTRL_CARKITMODE (1 << 2)
122#define ULPI_IFC_CTRL_CLOCKSUSPENDM (1 << 3)
123#define ULPI_IFC_CTRL_AUTORESUME (1 << 4)
124#define ULPI_IFC_CTRL_EXTERNAL_VBUS (1 << 5)
125#define ULPI_IFC_CTRL_PASSTHRU (1 << 6)
126#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE (1 << 7)
127
128/* OTG Control */
129#define ULPI_OTG_CTRL_ID_PULLUP (1 << 0)
130#define ULPI_OTG_CTRL_DP_PULLDOWN (1 << 1)
131#define ULPI_OTG_CTRL_DM_PULLDOWN (1 << 2)
132#define ULPI_OTG_CTRL_DISCHRGVBUS (1 << 3)
133#define ULPI_OTG_CTRL_CHRGVBUS (1 << 4)
134#define ULPI_OTG_CTRL_DRVVBUS (1 << 5)
135#define ULPI_OTG_CTRL_DRVVBUS_EXT (1 << 6)
136#define ULPI_OTG_CTRL_EXTVBUSIND (1 << 7)
137
138/* USB Interrupt Enable Rising,
139 * USB Interrupt Enable Falling,
140 * USB Interrupt Status and
141 * USB Interrupt Latch
142 */
143#define ULPI_INT_HOST_DISCONNECT (1 << 0)
144#define ULPI_INT_VBUS_VALID (1 << 1)
145#define ULPI_INT_SESS_VALID (1 << 2)
146#define ULPI_INT_SESS_END (1 << 3)
147#define ULPI_INT_IDGRD (1 << 4)
148
149/* Debug */
150#define ULPI_DEBUG_LINESTATE0 (1 << 0)
151#define ULPI_DEBUG_LINESTATE1 (1 << 1)
152
153/* Carkit Control */
154#define ULPI_CARKIT_CTRL_CARKITPWR (1 << 0)
155#define ULPI_CARKIT_CTRL_IDGNDDRV (1 << 1)
156#define ULPI_CARKIT_CTRL_TXDEN (1 << 2)
157#define ULPI_CARKIT_CTRL_RXDEN (1 << 3)
158#define ULPI_CARKIT_CTRL_SPKLEFTEN (1 << 4)
159#define ULPI_CARKIT_CTRL_SPKRIGHTEN (1 << 5)
160#define ULPI_CARKIT_CTRL_MICEN (1 << 6)
161
162/* Carkit Interrupt Enable */
163#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE (1 << 0)
164#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL (1 << 1)
165#define ULPI_CARKIT_INT_EN_CARINTDET (1 << 2)
166#define ULPI_CARKIT_INT_EN_DP_RISE (1 << 3)
167#define ULPI_CARKIT_INT_EN_DP_FALL (1 << 4)
168
169/* Carkit Interrupt Status and
170 * Carkit Interrupt Latch
171 */
172#define ULPI_CARKIT_INT_IDFLOAT (1 << 0)
173#define ULPI_CARKIT_INT_CARINTDET (1 << 1)
174#define ULPI_CARKIT_INT_DP (1 << 2)
175
176/* Carkit Pulse Control*/
177#define ULPI_CARKIT_PLS_CTRL_TXPLSEN (1 << 0)
178#define ULPI_CARKIT_PLS_CTRL_RXPLSEN (1 << 1)
179#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2)
180#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3)
181
182/*-------------------------------------------------------------------------*/
183
184#if IS_ENABLED(CONFIG_USB_ULPI) 54#if IS_ENABLED(CONFIG_USB_ULPI)
185struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, 55struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
186 unsigned int flags); 56 unsigned int flags);
diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h
index f92eb635b9d3..11525d8d89a7 100644
--- a/include/linux/usb/usb338x.h
+++ b/include/linux/usb/usb338x.h
@@ -43,6 +43,10 @@
43#define IN_ENDPOINT_TYPE 12 43#define IN_ENDPOINT_TYPE 12
44#define OUT_ENDPOINT_ENABLE 10 44#define OUT_ENDPOINT_ENABLE 10
45#define OUT_ENDPOINT_TYPE 8 45#define OUT_ENDPOINT_TYPE 8
46#define USB3380_EP_CFG_MASK_IN ((0x3 << IN_ENDPOINT_TYPE) | \
47 BIT(IN_ENDPOINT_ENABLE))
48#define USB3380_EP_CFG_MASK_OUT ((0x3 << OUT_ENDPOINT_TYPE) | \
49 BIT(OUT_ENDPOINT_ENABLE))
46 50
47struct usb338x_usb_ext_regs { 51struct usb338x_usb_ext_regs {
48 u32 usbclass; 52 u32 usbclass;
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
index 51865d05b267..ce63a2c3a612 100644
--- a/include/linux/virtio_byteorder.h
+++ b/include/linux/virtio_byteorder.h
@@ -3,17 +3,21 @@
3#include <linux/types.h> 3#include <linux/types.h>
4#include <uapi/linux/virtio_types.h> 4#include <uapi/linux/virtio_types.h>
5 5
6/* 6static inline bool virtio_legacy_is_little_endian(void)
7 * Low-level memory accessors for handling virtio in modern little endian and in 7{
8 * compatibility native endian format. 8#ifdef __LITTLE_ENDIAN
9 */ 9 return true;
10#else
11 return false;
12#endif
13}
10 14
11static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) 15static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
12{ 16{
13 if (little_endian) 17 if (little_endian)
14 return le16_to_cpu((__force __le16)val); 18 return le16_to_cpu((__force __le16)val);
15 else 19 else
16 return (__force u16)val; 20 return be16_to_cpu((__force __be16)val);
17} 21}
18 22
19static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) 23static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
@@ -21,7 +25,7 @@ static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
21 if (little_endian) 25 if (little_endian)
22 return (__force __virtio16)cpu_to_le16(val); 26 return (__force __virtio16)cpu_to_le16(val);
23 else 27 else
24 return (__force __virtio16)val; 28 return (__force __virtio16)cpu_to_be16(val);
25} 29}
26 30
27static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) 31static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
@@ -29,7 +33,7 @@ static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
29 if (little_endian) 33 if (little_endian)
30 return le32_to_cpu((__force __le32)val); 34 return le32_to_cpu((__force __le32)val);
31 else 35 else
32 return (__force u32)val; 36 return be32_to_cpu((__force __be32)val);
33} 37}
34 38
35static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) 39static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
@@ -37,7 +41,7 @@ static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
37 if (little_endian) 41 if (little_endian)
38 return (__force __virtio32)cpu_to_le32(val); 42 return (__force __virtio32)cpu_to_le32(val);
39 else 43 else
40 return (__force __virtio32)val; 44 return (__force __virtio32)cpu_to_be32(val);
41} 45}
42 46
43static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) 47static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
@@ -45,7 +49,7 @@ static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
45 if (little_endian) 49 if (little_endian)
46 return le64_to_cpu((__force __le64)val); 50 return le64_to_cpu((__force __le64)val);
47 else 51 else
48 return (__force u64)val; 52 return be64_to_cpu((__force __be64)val);
49} 53}
50 54
51static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) 55static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
@@ -53,7 +57,7 @@ static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
53 if (little_endian) 57 if (little_endian)
54 return (__force __virtio64)cpu_to_le64(val); 58 return (__force __virtio64)cpu_to_le64(val);
55 else 59 else
56 return (__force __virtio64)val; 60 return (__force __virtio64)cpu_to_be64(val);
57} 61}
58 62
59#endif /* _LINUX_VIRTIO_BYTEORDER */ 63#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 1e306f727edc..e5ce8ab0b8b0 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -205,35 +205,41 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
205 return 0; 205 return 0;
206} 206}
207 207
208static inline bool virtio_is_little_endian(struct virtio_device *vdev)
209{
210 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
211 virtio_legacy_is_little_endian();
212}
213
208/* Memory accessors */ 214/* Memory accessors */
209static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) 215static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
210{ 216{
211 return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 217 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
212} 218}
213 219
214static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) 220static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
215{ 221{
216 return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 222 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
217} 223}
218 224
219static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) 225static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
220{ 226{
221 return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 227 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
222} 228}
223 229
224static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) 230static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
225{ 231{
226 return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 232 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
227} 233}
228 234
229static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) 235static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
230{ 236{
231 return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 237 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
232} 238}
233 239
234static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) 240static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
235{ 241{
236 return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 242 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
237} 243}
238 244
239/* Config space accessors. */ 245/* Config space accessors. */
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 79242e9c06b8..c0131358f351 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -120,6 +120,8 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
120 dma_addr_t); 120 dma_addr_t);
121 121
122size_t vme_get_size(struct vme_resource *); 122size_t vme_get_size(struct vme_resource *);
123int vme_check_window(u32 aspace, unsigned long long vme_base,
124 unsigned long long size);
123 125
124struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32); 126struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
125int vme_slave_set(struct vme_resource *, int, unsigned long long, 127int vme_slave_set(struct vme_resource *, int, unsigned long long,
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index a3fa537e717a..bc6c28d04263 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -226,33 +226,39 @@ static inline void vringh_notify(struct vringh *vrh)
226 vrh->notify(vrh); 226 vrh->notify(vrh);
227} 227}
228 228
229static inline bool vringh_is_little_endian(const struct vringh *vrh)
230{
231 return vrh->little_endian ||
232 virtio_legacy_is_little_endian();
233}
234
229static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) 235static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
230{ 236{
231 return __virtio16_to_cpu(vrh->little_endian, val); 237 return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
232} 238}
233 239
234static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) 240static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
235{ 241{
236 return __cpu_to_virtio16(vrh->little_endian, val); 242 return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
237} 243}
238 244
239static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) 245static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
240{ 246{
241 return __virtio32_to_cpu(vrh->little_endian, val); 247 return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
242} 248}
243 249
244static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) 250static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
245{ 251{
246 return __cpu_to_virtio32(vrh->little_endian, val); 252 return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
247} 253}
248 254
249static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) 255static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
250{ 256{
251 return __virtio64_to_cpu(vrh->little_endian, val); 257 return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
252} 258}
253 259
254static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) 260static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
255{ 261{
256 return __cpu_to_virtio64(vrh->little_endian, val); 262 return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
257} 263}
258#endif /* _LINUX_VRINGH_H */ 264#endif /* _LINUX_VRINGH_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d69ac4ecc88b..1e1bf9f963a9 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -358,6 +358,19 @@ do { \
358 __ret; \ 358 __ret; \
359}) 359})
360 360
361#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
362 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
363 cmd1; schedule(); cmd2)
364/*
365 * Just like wait_event_cmd(), except it sets exclusive flag
366 */
367#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
368do { \
369 if (condition) \
370 break; \
371 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
372} while (0)
373
361#define __wait_event_cmd(wq, condition, cmd1, cmd2) \ 374#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
362 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 375 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
363 cmd1; schedule(); cmd2) 376 cmd1; schedule(); cmd2)
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index a746bf5216f8..f47feada5b42 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -65,6 +65,8 @@ struct watchdog_ops {
65 * @driver-data:Pointer to the drivers private data. 65 * @driver-data:Pointer to the drivers private data.
66 * @lock: Lock for watchdog core internal use only. 66 * @lock: Lock for watchdog core internal use only.
67 * @status: Field that contains the devices internal status bits. 67 * @status: Field that contains the devices internal status bits.
68 * @deferred: entry in wtd_deferred_reg_list which is used to
69 * register early initialized watchdogs.
68 * 70 *
69 * The watchdog_device structure contains all information about a 71 * The watchdog_device structure contains all information about a
70 * watchdog timer device. 72 * watchdog timer device.
@@ -95,6 +97,7 @@ struct watchdog_device {
95#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */ 97#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */
96#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */ 98#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */
97#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ 99#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
100 struct list_head deferred;
98}; 101};
99 102
100#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) 103#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index deee212af8e0..738b30b39b68 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -424,6 +424,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
424void free_workqueue_attrs(struct workqueue_attrs *attrs); 424void free_workqueue_attrs(struct workqueue_attrs *attrs);
425int apply_workqueue_attrs(struct workqueue_struct *wq, 425int apply_workqueue_attrs(struct workqueue_struct *wq,
426 const struct workqueue_attrs *attrs); 426 const struct workqueue_attrs *attrs);
427int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
427 428
428extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 429extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
429 struct work_struct *work); 430 struct work_struct *work);
@@ -434,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
434 435
435extern void flush_workqueue(struct workqueue_struct *wq); 436extern void flush_workqueue(struct workqueue_struct *wq);
436extern void drain_workqueue(struct workqueue_struct *wq); 437extern void drain_workqueue(struct workqueue_struct *wq);
437extern void flush_scheduled_work(void);
438 438
439extern int schedule_on_each_cpu(work_func_t func); 439extern int schedule_on_each_cpu(work_func_t func);
440 440
@@ -531,6 +531,35 @@ static inline bool schedule_work(struct work_struct *work)
531} 531}
532 532
533/** 533/**
534 * flush_scheduled_work - ensure that any scheduled work has run to completion.
535 *
536 * Forces execution of the kernel-global workqueue and blocks until its
537 * completion.
538 *
539 * Think twice before calling this function! It's very easy to get into
540 * trouble if you don't take great care. Either of the following situations
541 * will lead to deadlock:
542 *
543 * One of the work items currently on the workqueue needs to acquire
544 * a lock held by your code or its caller.
545 *
546 * Your code is running in the context of a work routine.
547 *
548 * They will be detected by lockdep when they occur, but the first might not
549 * occur very often. It depends on what work items are on the workqueue and
550 * what locks they need, which you have no control over.
551 *
552 * In most situations flushing the entire workqueue is overkill; you merely
553 * need to know that a particular work item isn't queued and isn't running.
554 * In such cases you should use cancel_delayed_work_sync() or
555 * cancel_work_sync() instead.
556 */
557static inline void flush_scheduled_work(void)
558{
559 flush_workqueue(system_wq);
560}
561
562/**
534 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 563 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
535 * @cpu: cpu to use 564 * @cpu: cpu to use
536 * @dwork: job to be done 565 * @dwork: job to be done
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b2dd371ec0ca..b333c945e571 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -7,6 +7,8 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/workqueue.h> 8#include <linux/workqueue.h>
9#include <linux/fs.h> 9#include <linux/fs.h>
10#include <linux/flex_proportions.h>
11#include <linux/backing-dev-defs.h>
10 12
11DECLARE_PER_CPU(int, dirty_throttle_leaks); 13DECLARE_PER_CPU(int, dirty_throttle_leaks);
12 14
@@ -84,18 +86,95 @@ struct writeback_control {
84 unsigned for_reclaim:1; /* Invoked from the page allocator */ 86 unsigned for_reclaim:1; /* Invoked from the page allocator */
85 unsigned range_cyclic:1; /* range_start is cyclic */ 87 unsigned range_cyclic:1; /* range_start is cyclic */
86 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 88 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
89#ifdef CONFIG_CGROUP_WRITEBACK
90 struct bdi_writeback *wb; /* wb this writeback is issued under */
91 struct inode *inode; /* inode being written out */
92
93 /* foreign inode detection, see wbc_detach_inode() */
94 int wb_id; /* current wb id */
95 int wb_lcand_id; /* last foreign candidate wb id */
96 int wb_tcand_id; /* this foreign candidate wb id */
97 size_t wb_bytes; /* bytes written by current wb */
98 size_t wb_lcand_bytes; /* bytes written by last candidate */
99 size_t wb_tcand_bytes; /* bytes written by this candidate */
100#endif
87}; 101};
88 102
89/* 103/*
104 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
105 * and are measured against each other in. There always is one global
106 * domain, global_wb_domain, that every wb in the system is a member of.
107 * This allows measuring the relative bandwidth of each wb to distribute
108 * dirtyable memory accordingly.
109 */
110struct wb_domain {
111 spinlock_t lock;
112
113 /*
114 * Scale the writeback cache size proportional to the relative
115 * writeout speed.
116 *
117 * We do this by keeping a floating proportion between BDIs, based
118 * on page writeback completions [end_page_writeback()]. Those
119 * devices that write out pages fastest will get the larger share,
120 * while the slower will get a smaller share.
121 *
122 * We use page writeout completions because we are interested in
123 * getting rid of dirty pages. Having them written out is the
124 * primary goal.
125 *
126 * We introduce a concept of time, a period over which we measure
127 * these events, because demand can/will vary over time. The length
128 * of this period itself is measured in page writeback completions.
129 */
130 struct fprop_global completions;
131 struct timer_list period_timer; /* timer for aging of completions */
132 unsigned long period_time;
133
134 /*
135 * The dirtyable memory and dirty threshold could be suddenly
136 * knocked down by a large amount (eg. on the startup of KVM in a
137 * swapless system). This may throw the system into deep dirty
138 * exceeded state and throttle heavy/light dirtiers alike. To
139 * retain good responsiveness, maintain global_dirty_limit for
140 * tracking slowly down to the knocked down dirty threshold.
141 *
142 * Both fields are protected by ->lock.
143 */
144 unsigned long dirty_limit_tstamp;
145 unsigned long dirty_limit;
146};
147
148/**
149 * wb_domain_size_changed - memory available to a wb_domain has changed
150 * @dom: wb_domain of interest
151 *
152 * This function should be called when the amount of memory available to
153 * @dom has changed. It resets @dom's dirty limit parameters to prevent
154 * the past values which don't match the current configuration from skewing
155 * dirty throttling. Without this, when memory size of a wb_domain is
156 * greatly reduced, the dirty throttling logic may allow too many pages to
157 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
158 * that situation.
159 */
160static inline void wb_domain_size_changed(struct wb_domain *dom)
161{
162 spin_lock(&dom->lock);
163 dom->dirty_limit_tstamp = jiffies;
164 dom->dirty_limit = 0;
165 spin_unlock(&dom->lock);
166}
167
168/*
90 * fs/fs-writeback.c 169 * fs/fs-writeback.c
91 */ 170 */
92struct bdi_writeback; 171struct bdi_writeback;
93void writeback_inodes_sb(struct super_block *, enum wb_reason reason); 172void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
94void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 173void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
95 enum wb_reason reason); 174 enum wb_reason reason);
96int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); 175bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
97int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 176bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
98 enum wb_reason reason); 177 enum wb_reason reason);
99void sync_inodes_sb(struct super_block *); 178void sync_inodes_sb(struct super_block *);
100void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); 179void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
101void inode_wait_for_writeback(struct inode *inode); 180void inode_wait_for_writeback(struct inode *inode);
@@ -107,6 +186,123 @@ static inline void wait_on_inode(struct inode *inode)
107 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); 186 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
108} 187}
109 188
189#ifdef CONFIG_CGROUP_WRITEBACK
190
191#include <linux/cgroup.h>
192#include <linux/bio.h>
193
194void __inode_attach_wb(struct inode *inode, struct page *page);
195void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
196 struct inode *inode)
197 __releases(&inode->i_lock);
198void wbc_detach_inode(struct writeback_control *wbc);
199void wbc_account_io(struct writeback_control *wbc, struct page *page,
200 size_t bytes);
201
202/**
203 * inode_attach_wb - associate an inode with its wb
204 * @inode: inode of interest
205 * @page: page being dirtied (may be NULL)
206 *
207 * If @inode doesn't have its wb, associate it with the wb matching the
208 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
209 * @inode->i_lock.
210 */
211static inline void inode_attach_wb(struct inode *inode, struct page *page)
212{
213 if (!inode->i_wb)
214 __inode_attach_wb(inode, page);
215}
216
217/**
218 * inode_detach_wb - disassociate an inode from its wb
219 * @inode: inode of interest
220 *
221 * @inode is being freed. Detach from its wb.
222 */
223static inline void inode_detach_wb(struct inode *inode)
224{
225 if (inode->i_wb) {
226 wb_put(inode->i_wb);
227 inode->i_wb = NULL;
228 }
229}
230
231/**
232 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
233 * @wbc: writeback_control of interest
234 * @inode: target inode
235 *
236 * This function is to be used by __filemap_fdatawrite_range(), which is an
237 * alternative entry point into writeback code, and first ensures @inode is
238 * associated with a bdi_writeback and attaches it to @wbc.
239 */
240static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
241 struct inode *inode)
242{
243 spin_lock(&inode->i_lock);
244 inode_attach_wb(inode, NULL);
245 wbc_attach_and_unlock_inode(wbc, inode);
246}
247
248/**
249 * wbc_init_bio - writeback specific initializtion of bio
250 * @wbc: writeback_control for the writeback in progress
251 * @bio: bio to be initialized
252 *
253 * @bio is a part of the writeback in progress controlled by @wbc. Perform
254 * writeback specific initialization. This is used to apply the cgroup
255 * writeback context.
256 */
257static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
258{
259 /*
260 * pageout() path doesn't attach @wbc to the inode being written
261 * out. This is intentional as we don't want the function to block
262 * behind a slow cgroup. Ultimately, we want pageout() to kick off
263 * regular writeback instead of writing things out itself.
264 */
265 if (wbc->wb)
266 bio_associate_blkcg(bio, wbc->wb->blkcg_css);
267}
268
269#else /* CONFIG_CGROUP_WRITEBACK */
270
271static inline void inode_attach_wb(struct inode *inode, struct page *page)
272{
273}
274
275static inline void inode_detach_wb(struct inode *inode)
276{
277}
278
279static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
280 struct inode *inode)
281 __releases(&inode->i_lock)
282{
283 spin_unlock(&inode->i_lock);
284}
285
286static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
287 struct inode *inode)
288{
289}
290
291static inline void wbc_detach_inode(struct writeback_control *wbc)
292{
293}
294
295static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
296{
297}
298
299static inline void wbc_account_io(struct writeback_control *wbc,
300 struct page *page, size_t bytes)
301{
302}
303
304#endif /* CONFIG_CGROUP_WRITEBACK */
305
110/* 306/*
111 * mm/page-writeback.c 307 * mm/page-writeback.c
112 */ 308 */
@@ -120,8 +316,12 @@ static inline void laptop_sync_completion(void) { }
120#endif 316#endif
121void throttle_vm_writeout(gfp_t gfp_mask); 317void throttle_vm_writeout(gfp_t gfp_mask);
122bool zone_dirty_ok(struct zone *zone); 318bool zone_dirty_ok(struct zone *zone);
319int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
320#ifdef CONFIG_CGROUP_WRITEBACK
321void wb_domain_exit(struct wb_domain *dom);
322#endif
123 323
124extern unsigned long global_dirty_limit; 324extern struct wb_domain global_wb_domain;
125 325
126/* These are exported to sysctl. */ 326/* These are exported to sysctl. */
127extern int dirty_background_ratio; 327extern int dirty_background_ratio;
@@ -155,19 +355,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int,
155 void __user *, size_t *, loff_t *); 355 void __user *, size_t *, loff_t *);
156 356
157void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); 357void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
158unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, 358unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
159 unsigned long dirty);
160
161void __bdi_update_bandwidth(struct backing_dev_info *bdi,
162 unsigned long thresh,
163 unsigned long bg_thresh,
164 unsigned long dirty,
165 unsigned long bdi_thresh,
166 unsigned long bdi_dirty,
167 unsigned long start_time);
168 359
360void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
169void page_writeback_init(void); 361void page_writeback_init(void);
170void balance_dirty_pages_ratelimited(struct address_space *mapping); 362void balance_dirty_pages_ratelimited(struct address_space *mapping);
363bool wb_over_bg_thresh(struct bdi_writeback *wb);
171 364
172typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 365typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
173 void *data); 366 void *data);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 56529b34dc63..d30eff3d84d5 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -81,7 +81,8 @@ struct zpool_driver {
81 atomic_t refcount; 81 atomic_t refcount;
82 struct list_head list; 82 struct list_head list;
83 83
84 void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops); 84 void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops,
85 struct zpool *zpool);
85 void (*destroy)(void *pool); 86 void (*destroy)(void *pool);
86 87
87 int (*malloc)(void *pool, size_t size, gfp_t gfp, 88 int (*malloc)(void *pool, size_t size, gfp_t gfp,
@@ -102,6 +103,4 @@ void zpool_register_driver(struct zpool_driver *driver);
102 103
103int zpool_unregister_driver(struct zpool_driver *driver); 104int zpool_unregister_driver(struct zpool_driver *driver);
104 105
105int zpool_evict(void *pool, unsigned long handle);
106
107#endif 106#endif
diff --git a/include/media/adp1653.h b/include/media/adp1653.h
index 1d9b48a3bd80..9779c8549eb4 100644
--- a/include/media/adp1653.h
+++ b/include/media/adp1653.h
@@ -100,9 +100,11 @@ struct adp1653_platform_data {
100 int (*power)(struct v4l2_subdev *sd, int on); 100 int (*power)(struct v4l2_subdev *sd, int on);
101 101
102 u32 max_flash_timeout; /* flash light timeout in us */ 102 u32 max_flash_timeout; /* flash light timeout in us */
103 u32 max_flash_intensity; /* led intensity, flash mode */ 103 u32 max_flash_intensity; /* led intensity, flash mode, mA */
104 u32 max_torch_intensity; /* led intensity, torch mode */ 104 u32 max_torch_intensity; /* led intensity, torch mode, mA */
105 u32 max_indicator_intensity; /* indicator led intensity */ 105 u32 max_indicator_intensity; /* indicator led intensity, uA */
106
107 struct gpio_desc *enable_gpio; /* for device-tree based boot */
106}; 108};
107 109
108#define to_adp1653_flash(sd) container_of(sd, struct adp1653_flash, subdev) 110#define to_adp1653_flash(sd) container_of(sd, struct adp1653_flash, subdev)
diff --git a/include/media/adv7511.h b/include/media/adv7511.h
index bb78bed9a5b8..d83b91d80764 100644
--- a/include/media/adv7511.h
+++ b/include/media/adv7511.h
@@ -40,9 +40,10 @@ struct adv7511_cec_arg {
40}; 40};
41 41
42struct adv7511_platform_data { 42struct adv7511_platform_data {
43 uint8_t i2c_edid; 43 u8 i2c_edid;
44 uint8_t i2c_cec; 44 u8 i2c_cec;
45 uint32_t cec_clk; 45 u8 i2c_pktmem;
46 u32 cec_clk;
46}; 47};
47 48
48#endif 49#endif
diff --git a/include/media/adv7604.h b/include/media/adv7604.h
index 9ecf353160c1..a913859bfd30 100644
--- a/include/media/adv7604.h
+++ b/include/media/adv7604.h
@@ -168,6 +168,5 @@ enum adv76xx_pad {
168 168
169/* notify events */ 169/* notify events */
170#define ADV76XX_HOTPLUG 1 170#define ADV76XX_HOTPLUG 1
171#define ADV76XX_FMT_CHANGE 2
172 171
173#endif 172#endif
diff --git a/include/media/adv7842.h b/include/media/adv7842.h
index 924cbb8d004a..bc249709bf35 100644
--- a/include/media/adv7842.h
+++ b/include/media/adv7842.h
@@ -30,14 +30,38 @@ enum adv7842_ain_sel {
30 ADV7842_AIN9_4_5_6_SYNC_2_1 = 4, 30 ADV7842_AIN9_4_5_6_SYNC_2_1 = 4,
31}; 31};
32 32
33/* Bus rotation and reordering (IO register 0x04, [7:5]) */ 33/*
34enum adv7842_op_ch_sel { 34 * Bus rotation and reordering. This is used to specify component reordering on
35 ADV7842_OP_CH_SEL_GBR = 0, 35 * the board and describes the components order on the bus when the ADV7842
36 ADV7842_OP_CH_SEL_GRB = 1, 36 * outputs RGB.
37 ADV7842_OP_CH_SEL_BGR = 2, 37 */
38 ADV7842_OP_CH_SEL_RGB = 3, 38enum adv7842_bus_order {
39 ADV7842_OP_CH_SEL_BRG = 4, 39 ADV7842_BUS_ORDER_RGB, /* No operation */
40 ADV7842_OP_CH_SEL_RBG = 5, 40 ADV7842_BUS_ORDER_GRB, /* Swap 1-2 */
41 ADV7842_BUS_ORDER_RBG, /* Swap 2-3 */
42 ADV7842_BUS_ORDER_BGR, /* Swap 1-3 */
43 ADV7842_BUS_ORDER_BRG, /* Rotate right */
44 ADV7842_BUS_ORDER_GBR, /* Rotate left */
45};
46
47/* Input Color Space (IO register 0x02, [7:4]) */
48enum adv7842_inp_color_space {
49 ADV7842_INP_COLOR_SPACE_LIM_RGB = 0,
50 ADV7842_INP_COLOR_SPACE_FULL_RGB = 1,
51 ADV7842_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
52 ADV7842_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
53 ADV7842_INP_COLOR_SPACE_XVYCC_601 = 4,
54 ADV7842_INP_COLOR_SPACE_XVYCC_709 = 5,
55 ADV7842_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
56 ADV7842_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
57 ADV7842_INP_COLOR_SPACE_AUTO = 0xf,
58};
59
60/* Select output format (IO register 0x03, [4:2]) */
61enum adv7842_op_format_mode_sel {
62 ADV7842_OP_FORMAT_MODE0 = 0x00,
63 ADV7842_OP_FORMAT_MODE1 = 0x04,
64 ADV7842_OP_FORMAT_MODE2 = 0x08,
41}; 65};
42 66
43/* Mode of operation */ 67/* Mode of operation */
@@ -61,44 +85,6 @@ enum adv7842_vid_std_select {
61 ADV7842_HDMI_COMP_VID_STD_HD_1250P = 0x1e, 85 ADV7842_HDMI_COMP_VID_STD_HD_1250P = 0x1e,
62}; 86};
63 87
64/* Input Color Space (IO register 0x02, [7:4]) */
65enum adv7842_inp_color_space {
66 ADV7842_INP_COLOR_SPACE_LIM_RGB = 0,
67 ADV7842_INP_COLOR_SPACE_FULL_RGB = 1,
68 ADV7842_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
69 ADV7842_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
70 ADV7842_INP_COLOR_SPACE_XVYCC_601 = 4,
71 ADV7842_INP_COLOR_SPACE_XVYCC_709 = 5,
72 ADV7842_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
73 ADV7842_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
74 ADV7842_INP_COLOR_SPACE_AUTO = 0xf,
75};
76
77/* Select output format (IO register 0x03, [7:0]) */
78enum adv7842_op_format_sel {
79 ADV7842_OP_FORMAT_SEL_SDR_ITU656_8 = 0x00,
80 ADV7842_OP_FORMAT_SEL_SDR_ITU656_10 = 0x01,
81 ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE0 = 0x02,
82 ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE1 = 0x06,
83 ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE2 = 0x0a,
84 ADV7842_OP_FORMAT_SEL_DDR_422_8 = 0x20,
85 ADV7842_OP_FORMAT_SEL_DDR_422_10 = 0x21,
86 ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE0 = 0x22,
87 ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE1 = 0x23,
88 ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE2 = 0x24,
89 ADV7842_OP_FORMAT_SEL_SDR_444_24 = 0x40,
90 ADV7842_OP_FORMAT_SEL_SDR_444_30 = 0x41,
91 ADV7842_OP_FORMAT_SEL_SDR_444_36_MODE0 = 0x42,
92 ADV7842_OP_FORMAT_SEL_DDR_444_24 = 0x60,
93 ADV7842_OP_FORMAT_SEL_DDR_444_30 = 0x61,
94 ADV7842_OP_FORMAT_SEL_DDR_444_36 = 0x62,
95 ADV7842_OP_FORMAT_SEL_SDR_ITU656_16 = 0x80,
96 ADV7842_OP_FORMAT_SEL_SDR_ITU656_20 = 0x81,
97 ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE0 = 0x82,
98 ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE1 = 0x86,
99 ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE2 = 0x8a,
100};
101
102enum adv7842_select_input { 88enum adv7842_select_input {
103 ADV7842_SELECT_HDMI_PORT_A, 89 ADV7842_SELECT_HDMI_PORT_A,
104 ADV7842_SELECT_HDMI_PORT_B, 90 ADV7842_SELECT_HDMI_PORT_B,
@@ -117,35 +103,35 @@ enum adv7842_drive_strength {
117 103
118struct adv7842_sdp_csc_coeff { 104struct adv7842_sdp_csc_coeff {
119 bool manual; 105 bool manual;
120 uint16_t scaling; 106 u16 scaling;
121 uint16_t A1; 107 u16 A1;
122 uint16_t A2; 108 u16 A2;
123 uint16_t A3; 109 u16 A3;
124 uint16_t A4; 110 u16 A4;
125 uint16_t B1; 111 u16 B1;
126 uint16_t B2; 112 u16 B2;
127 uint16_t B3; 113 u16 B3;
128 uint16_t B4; 114 u16 B4;
129 uint16_t C1; 115 u16 C1;
130 uint16_t C2; 116 u16 C2;
131 uint16_t C3; 117 u16 C3;
132 uint16_t C4; 118 u16 C4;
133}; 119};
134 120
135struct adv7842_sdp_io_sync_adjustment { 121struct adv7842_sdp_io_sync_adjustment {
136 bool adjust; 122 bool adjust;
137 uint16_t hs_beg; 123 u16 hs_beg;
138 uint16_t hs_width; 124 u16 hs_width;
139 uint16_t de_beg; 125 u16 de_beg;
140 uint16_t de_end; 126 u16 de_end;
141 uint8_t vs_beg_o; 127 u8 vs_beg_o;
142 uint8_t vs_beg_e; 128 u8 vs_beg_e;
143 uint8_t vs_end_o; 129 u8 vs_end_o;
144 uint8_t vs_end_e; 130 u8 vs_end_e;
145 uint8_t de_v_beg_o; 131 u8 de_v_beg_o;
146 uint8_t de_v_beg_e; 132 u8 de_v_beg_e;
147 uint8_t de_v_end_o; 133 u8 de_v_end_o;
148 uint8_t de_v_end_e; 134 u8 de_v_end_e;
149}; 135};
150 136
151/* Platform dependent definition */ 137/* Platform dependent definition */
@@ -163,7 +149,10 @@ struct adv7842_platform_data {
163 enum adv7842_ain_sel ain_sel; 149 enum adv7842_ain_sel ain_sel;
164 150
165 /* Bus rotation and reordering */ 151 /* Bus rotation and reordering */
166 enum adv7842_op_ch_sel op_ch_sel; 152 enum adv7842_bus_order bus_order;
153
154 /* Select output format mode */
155 enum adv7842_op_format_mode_sel op_format_mode_sel;
167 156
168 /* Default mode */ 157 /* Default mode */
169 enum adv7842_mode mode; 158 enum adv7842_mode mode;
@@ -174,20 +163,15 @@ struct adv7842_platform_data {
174 /* Video standard */ 163 /* Video standard */
175 enum adv7842_vid_std_select vid_std_select; 164 enum adv7842_vid_std_select vid_std_select;
176 165
177 /* Select output format */
178 enum adv7842_op_format_sel op_format_sel;
179
180 /* IO register 0x02 */ 166 /* IO register 0x02 */
181 unsigned alt_gamma:1; 167 unsigned alt_gamma:1;
182 unsigned op_656_range:1; 168 unsigned op_656_range:1;
183 unsigned rgb_out:1;
184 unsigned alt_data_sat:1; 169 unsigned alt_data_sat:1;
185 170
186 /* IO register 0x05 */ 171 /* IO register 0x05 */
187 unsigned blank_data:1; 172 unsigned blank_data:1;
188 unsigned insert_av_codes:1; 173 unsigned insert_av_codes:1;
189 unsigned replicate_av_codes:1; 174 unsigned replicate_av_codes:1;
190 unsigned invert_cbcr:1;
191 175
192 /* IO register 0x30 */ 176 /* IO register 0x30 */
193 unsigned output_bus_lsb_to_msb:1; 177 unsigned output_bus_lsb_to_msb:1;
@@ -246,9 +230,6 @@ struct adv7842_platform_data {
246#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001) 230#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001)
247#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002) 231#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
248 232
249/* notify events */
250#define ADV7842_FMT_CHANGE 1
251
252/* custom ioctl, used to test the external RAM that's used by the 233/* custom ioctl, used to test the external RAM that's used by the
253 * deinterlacer. */ 234 * deinterlacer. */
254#define ADV7842_CMD_RAM_TEST _IO('V', BASE_VIDIOC_PRIVATE) 235#define ADV7842_CMD_RAM_TEST _IO('V', BASE_VIDIOC_PRIVATE)
@@ -256,5 +237,6 @@ struct adv7842_platform_data {
256#define ADV7842_EDID_PORT_A 0 237#define ADV7842_EDID_PORT_A 0
257#define ADV7842_EDID_PORT_B 1 238#define ADV7842_EDID_PORT_B 1
258#define ADV7842_EDID_PORT_VGA 2 239#define ADV7842_EDID_PORT_VGA 2
240#define ADV7842_PAD_SOURCE 3
259 241
260#endif 242#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 2c7fbca40b69..45534da57759 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,6 +74,8 @@ enum rc_filter_type {
74 * @input_dev: the input child device used to communicate events to userspace 74 * @input_dev: the input child device used to communicate events to userspace
75 * @driver_type: specifies if protocol decoding is done in hardware or software 75 * @driver_type: specifies if protocol decoding is done in hardware or software
76 * @idle: used to keep track of RX state 76 * @idle: used to keep track of RX state
77 * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
78 * wakeup protocols is the set of all raw encoders
77 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols 79 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols
78 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols 80 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
79 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols 81 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -134,6 +136,7 @@ struct rc_dev {
134 struct input_dev *input_dev; 136 struct input_dev *input_dev;
135 enum rc_driver_type driver_type; 137 enum rc_driver_type driver_type;
136 bool idle; 138 bool idle;
139 bool encode_wakeup;
137 u64 allowed_protocols; 140 u64 allowed_protocols;
138 u64 enabled_protocols; 141 u64 enabled_protocols;
139 u64 allowed_wakeup_protocols; 142 u64 allowed_wakeup_protocols;
@@ -239,10 +242,11 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
239 memset(ev, 0, sizeof(*ev)); 242 memset(ev, 0, sizeof(*ev));
240} 243}
241 244
242#define IR_MAX_DURATION 0xFFFFFFFF /* a bit more than 4 seconds */ 245#define IR_MAX_DURATION 500000000 /* 500 ms */
243#define US_TO_NS(usec) ((usec) * 1000) 246#define US_TO_NS(usec) ((usec) * 1000)
244#define MS_TO_US(msec) ((msec) * 1000) 247#define MS_TO_US(msec) ((msec) * 1000)
245#define MS_TO_NS(msec) ((msec) * 1000 * 1000) 248#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
249#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
246 250
247void ir_raw_event_handle(struct rc_dev *dev); 251void ir_raw_event_handle(struct rc_dev *dev);
248int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); 252int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -250,6 +254,9 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
250int ir_raw_event_store_with_filter(struct rc_dev *dev, 254int ir_raw_event_store_with_filter(struct rc_dev *dev,
251 struct ir_raw_event *ev); 255 struct ir_raw_event *ev);
252void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); 256void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
257int ir_raw_encode_scancode(u64 protocols,
258 const struct rc_scancode_filter *scancode,
259 struct ir_raw_event *events, unsigned int max);
253 260
254static inline void ir_raw_event_reset(struct rc_dev *dev) 261static inline void ir_raw_event_reset(struct rc_dev *dev)
255{ 262{
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index e7a1514075ec..27763d5bd261 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -194,7 +194,10 @@ void rc_map_init(void);
194#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly" 194#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
195#define RC_MAP_STREAMZAP "rc-streamzap" 195#define RC_MAP_STREAMZAP "rc-streamzap"
196#define RC_MAP_TBS_NEC "rc-tbs-nec" 196#define RC_MAP_TBS_NEC "rc-tbs-nec"
197#define RC_MAP_TECHNISAT_TS35 "rc-technisat-ts35"
197#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2" 198#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2"
199#define RC_MAP_TERRATEC_CINERGY_C_PCI "rc-terratec-cinergy-c-pci"
200#define RC_MAP_TERRATEC_CINERGY_S2_HD "rc-terratec-cinergy-s2-hd"
198#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs" 201#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs"
199#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim" 202#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim"
200#define RC_MAP_TERRATEC_SLIM_2 "rc-terratec-slim-2" 203#define RC_MAP_TERRATEC_SLIM_2 "rc-terratec-slim-2"
@@ -204,6 +207,7 @@ void rc_map_init(void);
204#define RC_MAP_TOTAL_MEDIA_IN_HAND_02 "rc-total-media-in-hand-02" 207#define RC_MAP_TOTAL_MEDIA_IN_HAND_02 "rc-total-media-in-hand-02"
205#define RC_MAP_TREKSTOR "rc-trekstor" 208#define RC_MAP_TREKSTOR "rc-trekstor"
206#define RC_MAP_TT_1500 "rc-tt-1500" 209#define RC_MAP_TT_1500 "rc-tt-1500"
210#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci"
207#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027" 211#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027"
208#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" 212#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100"
209#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" 213#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 4becc6716393..eecd3102a618 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -117,6 +117,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
117 * @vsync - the height of the vertical sync in lines. 117 * @vsync - the height of the vertical sync in lines.
118 * @polarities - the horizontal and vertical polarities (same as struct 118 * @polarities - the horizontal and vertical polarities (same as struct
119 * v4l2_bt_timings polarities). 119 * v4l2_bt_timings polarities).
120 * @interlaced - if this flag is true, it indicates interlaced format
120 * @fmt - the resulting timings. 121 * @fmt - the resulting timings.
121 * 122 *
122 * This function will attempt to detect if the given values correspond to a 123 * This function will attempt to detect if the given values correspond to a
@@ -124,7 +125,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
124 * in with the found CVT timings. 125 * in with the found CVT timings.
125 */ 126 */
126bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, 127bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
127 u32 polarities, struct v4l2_dv_timings *fmt); 128 u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt);
128 129
129/** v4l2_detect_gtf - detect if the given timings follow the GTF standard 130/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
130 * @frame_height - the total height of the frame (including blanking) in lines. 131 * @frame_height - the total height of the frame (including blanking) in lines.
@@ -132,6 +133,7 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
132 * @vsync - the height of the vertical sync in lines. 133 * @vsync - the height of the vertical sync in lines.
133 * @polarities - the horizontal and vertical polarities (same as struct 134 * @polarities - the horizontal and vertical polarities (same as struct
134 * v4l2_bt_timings polarities). 135 * v4l2_bt_timings polarities).
136 * @interlaced - if this flag is true, it indicates interlaced format
135 * @aspect - preferred aspect ratio. GTF has no method of determining the 137 * @aspect - preferred aspect ratio. GTF has no method of determining the
136 * aspect ratio in order to derive the image width from the 138 * aspect ratio in order to derive the image width from the
137 * image height, so it has to be passed explicitly. Usually 139 * image height, so it has to be passed explicitly. Usually
@@ -144,7 +146,7 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
144 * in with the found GTF timings. 146 * in with the found GTF timings.
145 */ 147 */
146bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync, 148bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
147 u32 polarities, struct v4l2_fract aspect, 149 u32 polarities, bool interlaced, struct v4l2_fract aspect,
148 struct v4l2_dv_timings *fmt); 150 struct v4l2_dv_timings *fmt);
149 151
150/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes 152/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
new file mode 100644
index 000000000000..098236c083b8
--- /dev/null
+++ b/include/media/v4l2-flash-led-class.h
@@ -0,0 +1,148 @@
1/*
2 * V4L2 flash LED sub-device registration helpers.
3 *
4 * Copyright (C) 2015 Samsung Electronics Co., Ltd
5 * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _V4L2_FLASH_H
13#define _V4L2_FLASH_H
14
15#include <media/v4l2-ctrls.h>
16#include <media/v4l2-subdev.h>
17
18struct led_classdev_flash;
19struct led_classdev;
20struct v4l2_flash;
21enum led_brightness;
22
23/*
24 * struct v4l2_flash_ctrl_data - flash control initialization data, filled
25 * basing on the features declared by the LED flash
26 * class driver in the v4l2_flash_config
27 * @config: initialization data for a control
28 * @cid: contains v4l2 flash control id if the config
29 * field was initialized, 0 otherwise
30 */
31struct v4l2_flash_ctrl_data {
32 struct v4l2_ctrl_config config;
33 u32 cid;
34};
35
36struct v4l2_flash_ops {
37 /* setup strobing the flash by hardware pin state assertion */
38 int (*external_strobe_set)(struct v4l2_flash *v4l2_flash,
39 bool enable);
40 /* convert intensity to brightness in a device specific manner */
41 enum led_brightness (*intensity_to_led_brightness)
42 (struct v4l2_flash *v4l2_flash, s32 intensity);
43 /* convert brightness to intensity in a device specific manner */
44 s32 (*led_brightness_to_intensity)
45 (struct v4l2_flash *v4l2_flash, enum led_brightness);
46};
47
48/**
49 * struct v4l2_flash_config - V4L2 Flash sub-device initialization data
50 * @dev_name: the name of the media entity,
51 unique in the system
52 * @torch_intensity: constraints for the LED in torch mode
53 * @indicator_intensity: constraints for the indicator LED
54 * @flash_faults: bitmask of flash faults that the LED flash class
55 device can report; corresponding LED_FAULT* bit
56 definitions are available in the header file
57 <linux/led-class-flash.h>
58 * @has_external_strobe: external strobe capability
59 */
60struct v4l2_flash_config {
61 char dev_name[32];
62 struct led_flash_setting torch_intensity;
63 struct led_flash_setting indicator_intensity;
64 u32 flash_faults;
65 unsigned int has_external_strobe:1;
66};
67
68/**
69 * struct v4l2_flash - Flash sub-device context
70 * @fled_cdev: LED flash class device controlled by this sub-device
71 * @iled_cdev: LED class device representing indicator LED associated
72 * with the LED flash class device
73 * @ops: V4L2 specific flash ops
74 * @sd: V4L2 sub-device
75 * @hdl: flash controls handler
76 * @ctrls: array of pointers to controls, whose values define
77 * the sub-device state
78 */
79struct v4l2_flash {
80 struct led_classdev_flash *fled_cdev;
81 struct led_classdev_flash *iled_cdev;
82 const struct v4l2_flash_ops *ops;
83
84 struct v4l2_subdev sd;
85 struct v4l2_ctrl_handler hdl;
86 struct v4l2_ctrl **ctrls;
87};
88
89static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash(
90 struct v4l2_subdev *sd)
91{
92 return container_of(sd, struct v4l2_flash, sd);
93}
94
95static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
96{
97 return container_of(c->handler, struct v4l2_flash, hdl);
98}
99
100#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
101/**
102 * v4l2_flash_init - initialize V4L2 flash led sub-device
103 * @dev: flash device, e.g. an I2C device
104 * @of_node: of_node of the LED, may be NULL if the same as device's
105 * @fled_cdev: LED flash class device to wrap
106 * @iled_cdev: LED flash class device representing indicator LED associated
107 * with fled_cdev, may be NULL
108 * @flash_ops: V4L2 Flash device ops
109 * @config: initialization data for V4L2 Flash sub-device
110 *
111 * Create V4L2 Flash sub-device wrapping given LED subsystem device.
112 *
113 * Returns: A valid pointer, or, when an error occurs, the return
114 * value is encoded using ERR_PTR(). Use IS_ERR() to check and
115 * PTR_ERR() to obtain the numeric return value.
116 */
117struct v4l2_flash *v4l2_flash_init(
118 struct device *dev, struct device_node *of_node,
119 struct led_classdev_flash *fled_cdev,
120 struct led_classdev_flash *iled_cdev,
121 const struct v4l2_flash_ops *ops,
122 struct v4l2_flash_config *config);
123
124/**
125 * v4l2_flash_release - release V4L2 Flash sub-device
126 * @flash: the V4L2 Flash sub-device to release
127 *
128 * Release V4L2 Flash sub-device.
129 */
130void v4l2_flash_release(struct v4l2_flash *v4l2_flash);
131
132#else
133static inline struct v4l2_flash *v4l2_flash_init(
134 struct device *dev, struct device_node *of_node,
135 struct led_classdev_flash *fled_cdev,
136 struct led_classdev_flash *iled_cdev,
137 const struct v4l2_flash_ops *ops,
138 struct v4l2_flash_config *config)
139{
140 return NULL;
141}
142
143static inline void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
144{
145}
146#endif /* CONFIG_V4L2_FLASH_LED_CLASS */
147
148#endif /* _V4L2_FLASH_H */
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 38d960d8dccd..73069e4c2796 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -96,6 +96,7 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
96 pix_fmt->colorspace = mbus_fmt->colorspace; 96 pix_fmt->colorspace = mbus_fmt->colorspace;
97 pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; 97 pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc;
98 pix_fmt->quantization = mbus_fmt->quantization; 98 pix_fmt->quantization = mbus_fmt->quantization;
99 pix_fmt->xfer_func = mbus_fmt->xfer_func;
99} 100}
100 101
101static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, 102static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
@@ -108,6 +109,7 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
108 mbus_fmt->colorspace = pix_fmt->colorspace; 109 mbus_fmt->colorspace = pix_fmt->colorspace;
109 mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc; 110 mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc;
110 mbus_fmt->quantization = pix_fmt->quantization; 111 mbus_fmt->quantization = pix_fmt->quantization;
112 mbus_fmt->xfer_func = pix_fmt->xfer_func;
111 mbus_fmt->code = code; 113 mbus_fmt->code = code;
112} 114}
113 115
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index c5f3914bc4d8..3bbd96da25c9 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -116,6 +116,8 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
116 struct v4l2_buffer *buf); 116 struct v4l2_buffer *buf);
117int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 117int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
118 struct v4l2_buffer *buf); 118 struct v4l2_buffer *buf);
119int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
120 struct v4l2_buffer *buf);
119int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 121int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
120 struct v4l2_create_buffers *create); 122 struct v4l2_create_buffers *create);
121 123
@@ -248,6 +250,8 @@ int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
248 struct v4l2_buffer *buf); 250 struct v4l2_buffer *buf);
249int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh, 251int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
250 struct v4l2_buffer *buf); 252 struct v4l2_buffer *buf);
253int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
254 struct v4l2_buffer *buf);
251int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, 255int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
252 enum v4l2_buf_type type); 256 enum v4l2_buf_type type);
253int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, 257int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
index f831c9c225b6..4dc34b245d47 100644
--- a/include/media/v4l2-of.h
+++ b/include/media/v4l2-of.h
@@ -57,16 +57,19 @@ struct v4l2_of_bus_parallel {
57 * @base: struct of_endpoint containing port, id, and local of_node 57 * @base: struct of_endpoint containing port, id, and local of_node
58 * @bus_type: bus type 58 * @bus_type: bus type
59 * @bus: bus configuration data structure 59 * @bus: bus configuration data structure
60 * @head: list head for this structure 60 * @link_frequencies: array of supported link frequencies
61 * @nr_of_link_frequencies: number of elements in link_frequenccies array
61 */ 62 */
62struct v4l2_of_endpoint { 63struct v4l2_of_endpoint {
63 struct of_endpoint base; 64 struct of_endpoint base;
65 /* Fields below this line will be zeroed by v4l2_of_parse_endpoint() */
64 enum v4l2_mbus_type bus_type; 66 enum v4l2_mbus_type bus_type;
65 union { 67 union {
66 struct v4l2_of_bus_parallel parallel; 68 struct v4l2_of_bus_parallel parallel;
67 struct v4l2_of_bus_mipi_csi2 mipi_csi2; 69 struct v4l2_of_bus_mipi_csi2 mipi_csi2;
68 } bus; 70 } bus;
69 struct list_head head; 71 u64 *link_frequencies;
72 unsigned int nr_of_link_frequencies;
70}; 73};
71 74
72/** 75/**
@@ -86,6 +89,9 @@ struct v4l2_of_link {
86#ifdef CONFIG_OF 89#ifdef CONFIG_OF
87int v4l2_of_parse_endpoint(const struct device_node *node, 90int v4l2_of_parse_endpoint(const struct device_node *node,
88 struct v4l2_of_endpoint *endpoint); 91 struct v4l2_of_endpoint *endpoint);
92struct v4l2_of_endpoint *v4l2_of_alloc_parse_endpoint(
93 const struct device_node *node);
94void v4l2_of_free_endpoint(struct v4l2_of_endpoint *endpoint);
89int v4l2_of_parse_link(const struct device_node *node, 95int v4l2_of_parse_link(const struct device_node *node,
90 struct v4l2_of_link *link); 96 struct v4l2_of_link *link);
91void v4l2_of_put_link(struct v4l2_of_link *link); 97void v4l2_of_put_link(struct v4l2_of_link *link);
@@ -97,6 +103,16 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
97 return -ENOSYS; 103 return -ENOSYS;
98} 104}
99 105
106static inline struct v4l2_of_endpoint *v4l2_of_alloc_parse_endpoint(
107 const struct device_node *node)
108{
109 return NULL;
110}
111
112static inline void v4l2_of_free_endpoint(struct v4l2_of_endpoint *endpoint)
113{
114}
115
100static inline int v4l2_of_parse_link(const struct device_node *node, 116static inline int v4l2_of_parse_link(const struct device_node *node,
101 struct v4l2_of_link *link) 117 struct v4l2_of_link *link)
102{ 118{
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 2f0a345a7fed..4e18318eb425 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -40,6 +40,8 @@
40#define V4L2_SUBDEV_IR_TX_NOTIFY _IOW('v', 1, u32) 40#define V4L2_SUBDEV_IR_TX_NOTIFY _IOW('v', 1, u32)
41#define V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ 0x00000001 41#define V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ 0x00000001
42 42
43#define V4L2_DEVICE_NOTIFY_EVENT _IOW('v', 2, struct v4l2_event)
44
43struct v4l2_device; 45struct v4l2_device;
44struct v4l2_ctrl_handler; 46struct v4l2_ctrl_handler;
45struct v4l2_event_subscription; 47struct v4l2_event_subscription;
@@ -293,14 +295,6 @@ struct v4l2_mbus_frame_desc {
293 295
294 g_dv_timings(): Get custom dv timings in the sub device. 296 g_dv_timings(): Get custom dv timings in the sub device.
295 297
296 enum_mbus_fmt: enumerate pixel formats, provided by a video data source
297
298 g_mbus_fmt: get the current pixel format, provided by a video data source
299
300 try_mbus_fmt: try to set a pixel format on a video data source
301
302 s_mbus_fmt: set a pixel format on a video data source
303
304 g_mbus_config: get supported mediabus configurations 298 g_mbus_config: get supported mediabus configurations
305 299
306 s_mbus_config: set a certain mediabus configuration. This operation is added 300 s_mbus_config: set a certain mediabus configuration. This operation is added
@@ -338,14 +332,6 @@ struct v4l2_subdev_video_ops {
338 struct v4l2_dv_timings *timings); 332 struct v4l2_dv_timings *timings);
339 int (*query_dv_timings)(struct v4l2_subdev *sd, 333 int (*query_dv_timings)(struct v4l2_subdev *sd,
340 struct v4l2_dv_timings *timings); 334 struct v4l2_dv_timings *timings);
341 int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index,
342 u32 *code);
343 int (*g_mbus_fmt)(struct v4l2_subdev *sd,
344 struct v4l2_mbus_framefmt *fmt);
345 int (*try_mbus_fmt)(struct v4l2_subdev *sd,
346 struct v4l2_mbus_framefmt *fmt);
347 int (*s_mbus_fmt)(struct v4l2_subdev *sd,
348 struct v4l2_mbus_framefmt *fmt);
349 int (*g_mbus_config)(struct v4l2_subdev *sd, 335 int (*g_mbus_config)(struct v4l2_subdev *sd,
350 struct v4l2_mbus_config *cfg); 336 struct v4l2_mbus_config *cfg);
351 int (*s_mbus_config)(struct v4l2_subdev *sd, 337 int (*s_mbus_config)(struct v4l2_subdev *sd,
@@ -619,6 +605,8 @@ struct v4l2_subdev {
619 struct video_device *devnode; 605 struct video_device *devnode;
620 /* pointer to the physical device, if any */ 606 /* pointer to the physical device, if any */
621 struct device *dev; 607 struct device *dev;
608 /* The device_node of the subdev, usually the same as dev->of_node. */
609 struct device_node *of_node;
622 /* Links this subdev to a global subdev_list or @notifier->done list. */ 610 /* Links this subdev to a global subdev_list or @notifier->done list. */
623 struct list_head async_list; 611 struct list_head async_list;
624 /* Pointer to respective struct v4l2_async_subdev. */ 612 /* Pointer to respective struct v4l2_async_subdev. */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index a5790fd5d125..22a44c2f5963 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -381,6 +381,9 @@ struct v4l2_fh;
381 * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for 381 * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
382 * buffers. Only set for capture queues if qbuf has not yet been 382 * buffers. Only set for capture queues if qbuf has not yet been
383 * called since poll() needs to return POLLERR in that situation. 383 * called since poll() needs to return POLLERR in that situation.
384 * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the
385 * last decoded buffer was already dequeued. Set for capture queues
386 * when a buffer with the V4L2_BUF_FLAG_LAST is dequeued.
384 * @fileio: file io emulator internal data, used only if emulator is active 387 * @fileio: file io emulator internal data, used only if emulator is active
385 * @threadio: thread io internal data, used only if thread is active 388 * @threadio: thread io internal data, used only if thread is active
386 */ 389 */
@@ -423,6 +426,7 @@ struct vb2_queue {
423 unsigned int start_streaming_called:1; 426 unsigned int start_streaming_called:1;
424 unsigned int error:1; 427 unsigned int error:1;
425 unsigned int waiting_for_buffers:1; 428 unsigned int waiting_for_buffers:1;
429 unsigned int last_buffer_dequeued:1;
426 430
427 struct vb2_fileio_data *fileio; 431 struct vb2_fileio_data *fileio;
428 struct vb2_threadio_data *threadio; 432 struct vb2_threadio_data *threadio;
@@ -603,6 +607,15 @@ static inline bool vb2_start_streaming_called(struct vb2_queue *q)
603 return q->start_streaming_called; 607 return q->start_streaming_called;
604} 608}
605 609
610/**
611 * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue
612 * @q: videobuf queue
613 */
614static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
615{
616 q->last_buffer_dequeued = false;
617}
618
606/* 619/*
607 * The following functions are not part of the vb2 core API, but are simple 620 * The following functions are not part of the vb2 core API, but are simple
608 * helper functions that you can use in your struct v4l2_file_operations, 621 * helper functions that you can use in your struct v4l2_file_operations,
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
new file mode 100644
index 000000000000..5ae962512fb8
--- /dev/null
+++ b/include/misc/cxl-base.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef _MISC_CXL_BASE_H
11#define _MISC_CXL_BASE_H
12
13#ifdef CONFIG_CXL_BASE
14
15#define CXL_IRQ_RANGES 4
16
17struct cxl_irq_ranges {
18 irq_hw_number_t offset[CXL_IRQ_RANGES];
19 irq_hw_number_t range[CXL_IRQ_RANGES];
20};
21
22extern atomic_t cxl_use_count;
23
24static inline bool cxl_ctx_in_use(void)
25{
26 return (atomic_read(&cxl_use_count) != 0);
27}
28
29static inline void cxl_ctx_get(void)
30{
31 atomic_inc(&cxl_use_count);
32}
33
34static inline void cxl_ctx_put(void)
35{
36 atomic_dec(&cxl_use_count);
37}
38
39void cxl_slbia(struct mm_struct *mm);
40
41#else /* CONFIG_CXL_BASE */
42
43static inline bool cxl_ctx_in_use(void) { return false; }
44static inline void cxl_slbia(struct mm_struct *mm) {}
45
46#endif /* CONFIG_CXL_BASE */
47
48#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 975cc7861f18..7a6c1d6cc173 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2014 IBM Corp. 2 * Copyright 2015 IBM Corp.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -10,39 +10,194 @@
10#ifndef _MISC_CXL_H 10#ifndef _MISC_CXL_H
11#define _MISC_CXL_H 11#define _MISC_CXL_H
12 12
13#ifdef CONFIG_CXL_BASE 13#include <linux/pci.h>
14#include <linux/poll.h>
15#include <linux/interrupt.h>
16#include <uapi/misc/cxl.h>
14 17
15#define CXL_IRQ_RANGES 4 18/*
19 * This documents the in kernel API for driver to use CXL. It allows kernel
20 * drivers to bind to AFUs using an AFU configuration record exposed as a PCI
21 * configuration record.
22 *
23 * This API enables control over AFU and contexts which can't be part of the
24 * generic PCI API. This API is agnostic to the actual AFU.
25 */
26
27/* Get the AFU associated with a pci_dev */
28struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
29
30/* Get the AFU conf record number associated with a pci_dev */
31unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
32
33/* Get the physical device (ie. the PCIe card) which the AFU is attached */
34struct device *cxl_get_phys_dev(struct pci_dev *dev);
35
36
37/*
38 * Context lifetime overview:
39 *
40 * An AFU context may be inited and then started and stoppped multiple times
41 * before it's released. ie.
42 * - cxl_dev_context_init()
43 * - cxl_start_context()
44 * - cxl_stop_context()
45 * - cxl_start_context()
46 * - cxl_stop_context()
47 * ...repeat...
48 * - cxl_release_context()
49 * Once released, a context can't be started again.
50 *
51 * One context is inited by the cxl driver for every pci_dev. This is to be
52 * used as a default kernel context. cxl_get_context() will get this
53 * context. This context will be released by PCI hot unplug, so doesn't need to
54 * be released explicitly by drivers.
55 *
56 * Additional kernel contexts may be inited using cxl_dev_context_init().
57 * These must be released using cxl_context_detach().
58 *
59 * Once a context has been inited, IRQs may be configured. Firstly these IRQs
60 * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to
61 * specific handlers (cxl_map_afu_irq()).
62 *
63 * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released
64 * (cxl_free_afu_irqs()).
65 *
66 * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU
67 * hardware to lose track of all contexts. It's upto the caller of
68 * cxl_afu_reset() to restart these contexts.
69 */
70
71/*
72 * On pci_enabled_device(), the cxl driver will init a single cxl context for
73 * use by the driver. It doesn't start this context (as that will likely
74 * generate DMA traffic for most AFUs).
75 *
76 * This gets the default context associated with this pci_dev. This context
77 * doesn't need to be released as this will be done by the PCI subsystem on hot
78 * unplug.
79 */
80struct cxl_context *cxl_get_context(struct pci_dev *dev);
81/*
82 * Allocate and initalise a context associated with a AFU PCI device. This
83 * doesn't start the context in the AFU.
84 */
85struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
86/*
87 * Release and free a context. Context should be stopped before calling.
88 */
89int cxl_release_context(struct cxl_context *ctx);
16 90
17struct cxl_irq_ranges { 91/*
18 irq_hw_number_t offset[CXL_IRQ_RANGES]; 92 * Allocate AFU interrupts for this context. num=0 will allocate the default
19 irq_hw_number_t range[CXL_IRQ_RANGES]; 93 * for this AFU as given in the AFU descriptor. This number doesn't include the
20}; 94 * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
95 * used must map a handler with cxl_map_afu_irq.
96 */
97int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num);
98/* Free allocated interrupts */
99void cxl_free_afu_irqs(struct cxl_context *cxl);
100
101/*
102 * Map a handler for an AFU interrupt associated with a particular context. AFU
103 * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie
104 * is private data is that will be provided to the interrupt handler.
105 */
106int cxl_map_afu_irq(struct cxl_context *cxl, int num,
107 irq_handler_t handler, void *cookie, char *name);
108/* unmap mapped IRQ handlers */
109void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie);
21 110
22extern atomic_t cxl_use_count; 111/*
112 * Start work on the AFU. This starts an cxl context and associates it with a
113 * task. task == NULL will make it a kernel context.
114 */
115int cxl_start_context(struct cxl_context *ctx, u64 wed,
116 struct task_struct *task);
117/*
118 * Stop a context and remove it from the PSL
119 */
120int cxl_stop_context(struct cxl_context *ctx);
23 121
24static inline bool cxl_ctx_in_use(void) 122/* Reset the AFU */
25{ 123int cxl_afu_reset(struct cxl_context *ctx);
26 return (atomic_read(&cxl_use_count) != 0);
27}
28 124
29static inline void cxl_ctx_get(void) 125/*
30{ 126 * Set a context as a master context.
31 atomic_inc(&cxl_use_count); 127 * This sets the default problem space area mapped as the full space, rather
32} 128 * than just the per context area (for slaves).
129 */
130void cxl_set_master(struct cxl_context *ctx);
33 131
34static inline void cxl_ctx_put(void) 132/*
35{ 133 * Map and unmap the AFU Problem Space area. The amount and location mapped
36 atomic_dec(&cxl_use_count); 134 * depends on if this context is a master or slave.
37} 135 */
136void __iomem *cxl_psa_map(struct cxl_context *ctx);
137void cxl_psa_unmap(void __iomem *addr);
38 138
39void cxl_slbia(struct mm_struct *mm); 139/* Get the process element for this context */
140int cxl_process_element(struct cxl_context *ctx);
40 141
41#else /* CONFIG_CXL_BASE */
42 142
43static inline bool cxl_ctx_in_use(void) { return false; } 143/*
44static inline void cxl_slbia(struct mm_struct *mm) {} 144 * These calls allow drivers to create their own file descriptors and make them
145 * identical to the cxl file descriptor user API. An example use case:
146 *
147 * struct file_operations cxl_my_fops = {};
148 * ......
149 * // Init the context
150 * ctx = cxl_dev_context_init(dev);
151 * if (IS_ERR(ctx))
152 * return PTR_ERR(ctx);
153 * // Create and attach a new file descriptor to my file ops
154 * file = cxl_get_fd(ctx, &cxl_my_fops, &fd);
155 * // Start context
156 * rc = cxl_start_work(ctx, &work.work);
157 * if (rc) {
158 * fput(file);
159 * put_unused_fd(fd);
160 * return -ENODEV;
161 * }
162 * // No error paths after installing the fd
163 * fd_install(fd, file);
164 * return fd;
165 *
166 * This inits a context, and gets a file descriptor and associates some file
167 * ops to that file descriptor. If the file ops are blank, the cxl driver will
168 * fill them in with the default ones that mimic the standard user API. Once
169 * completed, the file descriptor can be installed. Once the file descriptor is
170 * installed, it's visible to the user so no errors must occur past this point.
171 *
172 * If cxl_fd_release() file op call is installed, the context will be stopped
173 * and released when the fd is released. Hence the driver won't need to manage
174 * this itself.
175 */
45 176
46#endif /* CONFIG_CXL_BASE */ 177/*
178 * Take a context and associate it with my file ops. Returns the associated
179 * file and file descriptor. Any file ops which are blank are filled in by the
180 * cxl driver with the default ops to mimic the standard API.
181 */
182struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
183 int *fd);
184/* Get the context associated with this file */
185struct cxl_context *cxl_fops_get_context(struct file *file);
186/*
187 * Start a context associated a struct cxl_ioctl_start_work used by the
188 * standard cxl user API.
189 */
190int cxl_start_work(struct cxl_context *ctx,
191 struct cxl_ioctl_start_work *work);
192/*
193 * Export all the existing fops so drivers can use them
194 */
195int cxl_fd_open(struct inode *inode, struct file *file);
196int cxl_fd_release(struct inode *inode, struct file *file);
197long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
198int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
199unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
200ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
201 loff_t *off);
47 202
48#endif 203#endif /* _MISC_CXL_H */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3ee4c92afd1b..931738bc5bba 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -99,7 +99,6 @@ struct tc_action_ops {
99 99
100int tcf_hash_search(struct tc_action *a, u32 index); 100int tcf_hash_search(struct tc_action *a, u32 index);
101void tcf_hash_destroy(struct tc_action *a); 101void tcf_hash_destroy(struct tc_action *a);
102int tcf_hash_release(struct tc_action *a, int bind);
103u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); 102u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
104int tcf_hash_check(u32 index, struct tc_action *a, int bind); 103int tcf_hash_check(u32 index, struct tc_action *a, int bind);
105int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 104int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
107void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 106void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
108void tcf_hash_insert(struct tc_action *a); 107void tcf_hash_insert(struct tc_action *a);
109 108
109int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
110
111static inline int tcf_hash_release(struct tc_action *a, bool bind)
112{
113 return __tcf_hash_release(a, bind, false);
114}
115
110int tcf_register_action(struct tc_action_ops *a, unsigned int mask); 116int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
111int tcf_unregister_action(struct tc_action_ops *a); 117int tcf_unregister_action(struct tc_action_ops *a);
112int tcf_action_destroy(struct list_head *actions, int bind); 118int tcf_action_destroy(struct list_head *actions, int bind);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 80456f72d70a..def59d3a34d5 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -142,6 +142,7 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
142void ipv6_mc_remap(struct inet6_dev *idev); 142void ipv6_mc_remap(struct inet6_dev *idev);
143void ipv6_mc_init_dev(struct inet6_dev *idev); 143void ipv6_mc_init_dev(struct inet6_dev *idev);
144void ipv6_mc_destroy_dev(struct inet6_dev *idev); 144void ipv6_mc_destroy_dev(struct inet6_dev *idev);
145int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
145void addrconf_dad_failure(struct inet6_ifaddr *ifp); 146void addrconf_dad_failure(struct inet6_ifaddr *ifp);
146 147
147bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, 148bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a175ba4a7adb..4a167b30a12f 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -39,7 +39,6 @@ struct unix_skb_parms {
39}; 39};
40 40
41#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) 41#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
42#define UNIXSID(skb) (&UNIXCB((skb)).secid)
43 42
44#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) 43#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
45#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) 44#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 172632dd9930..db639a4c5ab8 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -74,7 +74,7 @@ void vsock_pending_work(struct work_struct *work);
74struct sock *__vsock_create(struct net *net, 74struct sock *__vsock_create(struct net *net,
75 struct socket *sock, 75 struct socket *sock,
76 struct sock *parent, 76 struct sock *parent,
77 gfp_t priority, unsigned short type); 77 gfp_t priority, unsigned short type, int kern);
78 78
79/**** TRANSPORT ****/ 79/**** TRANSPORT ****/
80 80
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 16a923a3a43a..e602f8177ebf 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/atomic.h> 14#include <linux/atomic.h>
15#include <net/neighbour.h> 15#include <net/neighbour.h>
16#include <net/sock.h>
16 17
17#define AX25_T1CLAMPLO 1 18#define AX25_T1CLAMPLO 1
18#define AX25_T1CLAMPHI (30 * HZ) 19#define AX25_T1CLAMPHI (30 * HZ)
@@ -246,7 +247,20 @@ typedef struct ax25_cb {
246 atomic_t refcount; 247 atomic_t refcount;
247} ax25_cb; 248} ax25_cb;
248 249
249#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) 250struct ax25_sock {
251 struct sock sk;
252 struct ax25_cb *cb;
253};
254
255static inline struct ax25_sock *ax25_sk(const struct sock *sk)
256{
257 return (struct ax25_sock *) sk;
258}
259
260static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
261{
262 return ax25_sk(sk)->cb;
263}
250 264
251#define ax25_for_each(__ax25, list) \ 265#define ax25_for_each(__ax25, list) \
252 hlist_for_each_entry(__ax25, list, ax25_node) 266 hlist_for_each_entry(__ax25, list, ax25_node)
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 7dba80546f16..38d8a34d3589 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -365,8 +365,19 @@ extern struct dentry *bt_debugfs;
365int l2cap_init(void); 365int l2cap_init(void);
366void l2cap_exit(void); 366void l2cap_exit(void);
367 367
368#if IS_ENABLED(CONFIG_BT_BREDR)
368int sco_init(void); 369int sco_init(void);
369void sco_exit(void); 370void sco_exit(void);
371#else
372static inline int sco_init(void)
373{
374 return 0;
375}
376
377static inline void sco_exit(void)
378{
379}
380#endif
370 381
371int mgmt_init(void); 382int mgmt_init(void);
372void mgmt_exit(void); 383void mgmt_exit(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index d95da83cb1b0..7ca6690355ea 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1202,6 +1202,16 @@ struct hci_rp_read_clock {
1202 __le16 accuracy; 1202 __le16 accuracy;
1203} __packed; 1203} __packed;
1204 1204
1205#define HCI_OP_READ_ENC_KEY_SIZE 0x1408
1206struct hci_cp_read_enc_key_size {
1207 __le16 handle;
1208} __packed;
1209struct hci_rp_read_enc_key_size {
1210 __u8 status;
1211 __le16 handle;
1212 __u8 key_size;
1213} __packed;
1214
1205#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409 1215#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
1206struct hci_rp_read_local_amp_info { 1216struct hci_rp_read_local_amp_info {
1207 __u8 status; 1217 __u8 status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a056c2bfeb81..3bd618d3e55d 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -156,16 +156,22 @@ struct oob_data {
156}; 156};
157 157
158struct adv_info { 158struct adv_info {
159 struct delayed_work timeout_exp; 159 struct list_head list;
160 bool pending;
160 __u8 instance; 161 __u8 instance;
161 __u32 flags; 162 __u32 flags;
162 __u16 timeout; 163 __u16 timeout;
164 __u16 remaining_time;
165 __u16 duration;
163 __u16 adv_data_len; 166 __u16 adv_data_len;
164 __u8 adv_data[HCI_MAX_AD_LENGTH]; 167 __u8 adv_data[HCI_MAX_AD_LENGTH];
165 __u16 scan_rsp_len; 168 __u16 scan_rsp_len;
166 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; 169 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
167}; 170};
168 171
172#define HCI_MAX_ADV_INSTANCES 5
173#define HCI_DEFAULT_ADV_DURATION 2
174
169#define HCI_MAX_SHORT_NAME_LENGTH 10 175#define HCI_MAX_SHORT_NAME_LENGTH 10
170 176
171/* Default LE RPA expiry time, 15 minutes */ 177/* Default LE RPA expiry time, 15 minutes */
@@ -373,7 +379,11 @@ struct hci_dev {
373 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; 379 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
374 __u8 scan_rsp_data_len; 380 __u8 scan_rsp_data_len;
375 381
376 struct adv_info adv_instance; 382 struct list_head adv_instances;
383 unsigned int adv_instance_cnt;
384 __u8 cur_adv_instance;
385 __u16 adv_instance_timeout;
386 struct delayed_work adv_instance_expire;
377 387
378 __u8 irk[16]; 388 __u8 irk[16];
379 __u32 rpa_timeout; 389 __u32 rpa_timeout;
@@ -530,10 +540,22 @@ extern struct mutex hci_cb_list_lock;
530/* ----- HCI interface to upper protocols ----- */ 540/* ----- HCI interface to upper protocols ----- */
531int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 541int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
532int l2cap_disconn_ind(struct hci_conn *hcon); 542int l2cap_disconn_ind(struct hci_conn *hcon);
533int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); 543void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
534 544
545#if IS_ENABLED(CONFIG_BT_BREDR)
535int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); 546int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
536int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); 547void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
548#else
549static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
550 __u8 *flags)
551{
552 return 0;
553}
554
555static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
556{
557}
558#endif
537 559
538/* ----- Inquiry cache ----- */ 560/* ----- Inquiry cache ----- */
539#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ 561#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
@@ -561,11 +583,6 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
561 hdev->discovery.scan_duration = 0; 583 hdev->discovery.scan_duration = 0;
562} 584}
563 585
564static inline void adv_info_init(struct hci_dev *hdev)
565{
566 memset(&hdev->adv_instance, 0, sizeof(struct adv_info));
567}
568
569bool hci_discovery_active(struct hci_dev *hdev); 586bool hci_discovery_active(struct hci_dev *hdev);
570 587
571void hci_discovery_set_state(struct hci_dev *hdev, int state); 588void hci_discovery_set_state(struct hci_dev *hdev, int state);
@@ -1007,6 +1024,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1007int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1024int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1008 u8 bdaddr_type); 1025 u8 bdaddr_type);
1009 1026
1027void hci_adv_instances_clear(struct hci_dev *hdev);
1028struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
1029struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
1030int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1031 u16 adv_data_len, u8 *adv_data,
1032 u16 scan_rsp_len, u8 *scan_rsp_data,
1033 u16 timeout, u16 duration);
1034int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
1035
1010void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1036void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
1011 1037
1012int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); 1038int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1350,6 +1376,7 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
1350int mgmt_powered(struct hci_dev *hdev, u8 powered); 1376int mgmt_powered(struct hci_dev *hdev, u8 powered);
1351int mgmt_update_adv_data(struct hci_dev *hdev); 1377int mgmt_update_adv_data(struct hci_dev *hdev);
1352void mgmt_discoverable_timeout(struct hci_dev *hdev); 1378void mgmt_discoverable_timeout(struct hci_dev *hdev);
1379void mgmt_adv_timeout_expired(struct hci_dev *hdev);
1353void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 1380void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
1354 bool persistent); 1381 bool persistent);
1355void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, 1382void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
@@ -1408,7 +1435,7 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete);
1408u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 1435u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
1409 u16 to_multiplier); 1436 u16 to_multiplier);
1410void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, 1437void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
1411 __u8 ltk[16]); 1438 __u8 ltk[16], __u8 key_size);
1412 1439
1413void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 1440void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
1414 u8 *bdaddr_type); 1441 u8 *bdaddr_type);
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index ea6546d2c946..c28aca25320e 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -63,6 +63,9 @@ enum {
63 BOND_OPT_LP_INTERVAL, 63 BOND_OPT_LP_INTERVAL,
64 BOND_OPT_SLAVES, 64 BOND_OPT_SLAVES,
65 BOND_OPT_TLB_DYNAMIC_LB, 65 BOND_OPT_TLB_DYNAMIC_LB,
66 BOND_OPT_AD_ACTOR_SYS_PRIO,
67 BOND_OPT_AD_ACTOR_SYSTEM,
68 BOND_OPT_AD_USER_PORT_KEY,
66 BOND_OPT_LAST 69 BOND_OPT_LAST
67}; 70};
68 71
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 78ed135e9dea..20defc0353d1 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -136,6 +136,9 @@ struct bond_params {
136 int packets_per_slave; 136 int packets_per_slave;
137 int tlb_dynamic_lb; 137 int tlb_dynamic_lb;
138 struct reciprocal_value reciprocal_packets_per_slave; 138 struct reciprocal_value reciprocal_packets_per_slave;
139 u16 ad_actor_sys_prio;
140 u16 ad_user_port_key;
141 u8 ad_actor_system[ETH_ALEN];
139}; 142};
140 143
141struct bond_parm_tbl { 144struct bond_parm_tbl {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f8d6813cd5b2..883fe1e7c5a1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -111,7 +111,7 @@ enum ieee80211_band {
111 * This may be due to the driver or due to regulatory bandwidth 111 * This may be due to the driver or due to regulatory bandwidth
112 * restrictions. 112 * restrictions.
113 * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY 113 * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
114 * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT 114 * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
115 * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted 115 * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
116 * on this channel. 116 * on this channel.
117 * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted 117 * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
@@ -129,7 +129,7 @@ enum ieee80211_channel_flags {
129 IEEE80211_CHAN_NO_80MHZ = 1<<7, 129 IEEE80211_CHAN_NO_80MHZ = 1<<7,
130 IEEE80211_CHAN_NO_160MHZ = 1<<8, 130 IEEE80211_CHAN_NO_160MHZ = 1<<8,
131 IEEE80211_CHAN_INDOOR_ONLY = 1<<9, 131 IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
132 IEEE80211_CHAN_GO_CONCURRENT = 1<<10, 132 IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
133 IEEE80211_CHAN_NO_20MHZ = 1<<11, 133 IEEE80211_CHAN_NO_20MHZ = 1<<11,
134 IEEE80211_CHAN_NO_10MHZ = 1<<12, 134 IEEE80211_CHAN_NO_10MHZ = 1<<12,
135}; 135};
@@ -4575,13 +4575,15 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
4575 * @ie: information elements of the deauth/disassoc frame (may be %NULL) 4575 * @ie: information elements of the deauth/disassoc frame (may be %NULL)
4576 * @ie_len: length of IEs 4576 * @ie_len: length of IEs
4577 * @reason: reason code for the disconnection, set it to 0 if unknown 4577 * @reason: reason code for the disconnection, set it to 0 if unknown
4578 * @locally_generated: disconnection was requested locally
4578 * @gfp: allocation flags 4579 * @gfp: allocation flags
4579 * 4580 *
4580 * After it calls this function, the driver should enter an idle state 4581 * After it calls this function, the driver should enter an idle state
4581 * and not try to connect to any AP any more. 4582 * and not try to connect to any AP any more.
4582 */ 4583 */
4583void cfg80211_disconnected(struct net_device *dev, u16 reason, 4584void cfg80211_disconnected(struct net_device *dev, u16 reason,
4584 const u8 *ie, size_t ie_len, gfp_t gfp); 4585 const u8 *ie, size_t ie_len,
4586 bool locally_generated, gfp_t gfp);
4585 4587
4586/** 4588/**
4587 * cfg80211_ready_on_channel - notification of remain_on_channel start 4589 * cfg80211_ready_on_channel - notification of remain_on_channel start
@@ -4866,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
4866 struct cfg80211_chan_def *chandef, 4868 struct cfg80211_chan_def *chandef,
4867 enum nl80211_iftype iftype); 4869 enum nl80211_iftype iftype);
4868 4870
4871/**
4872 * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
4873 * @wiphy: the wiphy
4874 * @chandef: the channel definition
4875 * @iftype: interface type
4876 *
4877 * Return: %true if there is no secondary channel or the secondary channel(s)
4878 * can be used for beaconing (i.e. is not a radar channel etc.). This version
4879 * also checks if IR-relaxation conditions apply, to allow beaconing under
4880 * more permissive conditions.
4881 *
4882 * Requires the RTNL to be held.
4883 */
4884bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
4885 struct cfg80211_chan_def *chandef,
4886 enum nl80211_iftype iftype);
4887
4869/* 4888/*
4870 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4889 * cfg80211_ch_switch_notify - update wdev channel and notify userspace
4871 * @dev: the device which switched channels 4890 * @dev: the device which switched channels
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 6ea16c84293b..290a9a69af07 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -44,6 +44,8 @@ struct cfg802154_ops {
44 int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); 44 int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
45 int (*set_cca_mode)(struct wpan_phy *wpan_phy, 45 int (*set_cca_mode)(struct wpan_phy *wpan_phy,
46 const struct wpan_phy_cca *cca); 46 const struct wpan_phy_cca *cca);
47 int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
48 int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
47 int (*set_pan_id)(struct wpan_phy *wpan_phy, 49 int (*set_pan_id)(struct wpan_phy *wpan_phy,
48 struct wpan_dev *wpan_dev, __le16 pan_id); 50 struct wpan_dev *wpan_dev, __le16 pan_id);
49 int (*set_short_addr)(struct wpan_phy *wpan_phy, 51 int (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -61,14 +63,66 @@ struct cfg802154_ops {
61 struct wpan_dev *wpan_dev, bool mode); 63 struct wpan_dev *wpan_dev, bool mode);
62}; 64};
63 65
66static inline bool
67wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
68{
69 switch (st) {
70 case NL802154_SUPPORTED_BOOL_TRUE:
71 return b;
72 case NL802154_SUPPORTED_BOOL_FALSE:
73 return !b;
74 case NL802154_SUPPORTED_BOOL_BOTH:
75 return true;
76 default:
77 WARN_ON(1);
78 }
79
80 return false;
81}
82
83struct wpan_phy_supported {
84 u32 channels[IEEE802154_MAX_PAGE + 1],
85 cca_modes, cca_opts, iftypes;
86 enum nl802154_supported_bool_states lbt;
87 u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
88 min_csma_backoffs, max_csma_backoffs;
89 s8 min_frame_retries, max_frame_retries;
90 size_t tx_powers_size, cca_ed_levels_size;
91 const s32 *tx_powers, *cca_ed_levels;
92};
93
64struct wpan_phy_cca { 94struct wpan_phy_cca {
65 enum nl802154_cca_modes mode; 95 enum nl802154_cca_modes mode;
66 enum nl802154_cca_opts opt; 96 enum nl802154_cca_opts opt;
67}; 97};
68 98
69struct wpan_phy { 99static inline bool
70 struct mutex pib_lock; 100wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
101{
102 if (a->mode != b->mode)
103 return false;
104
105 if (a->mode == NL802154_CCA_ENERGY_CARRIER)
106 return a->opt == b->opt;
71 107
108 return true;
109}
110
111/**
112 * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
113 * transmit power setting.
114 * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
115 * level setting.
116 * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
117 * setting.
118 */
119enum wpan_phy_flags {
120 WPAN_PHY_FLAG_TXPOWER = BIT(1),
121 WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
122 WPAN_PHY_FLAG_CCA_MODE = BIT(3),
123};
124
125struct wpan_phy {
72 /* If multiple wpan_phys are registered and you're handed e.g. 126 /* If multiple wpan_phys are registered and you're handed e.g.
73 * a regular netdev with assigned ieee802154_ptr, you won't 127 * a regular netdev with assigned ieee802154_ptr, you won't
74 * know whether it points to a wpan_phy your driver has registered 128 * know whether it points to a wpan_phy your driver has registered
@@ -77,6 +131,8 @@ struct wpan_phy {
77 */ 131 */
78 const void *privid; 132 const void *privid;
79 133
134 u32 flags;
135
80 /* 136 /*
81 * This is a PIB according to 802.15.4-2011. 137 * This is a PIB according to 802.15.4-2011.
82 * We do not provide timing-related variables, as they 138 * We do not provide timing-related variables, as they
@@ -84,12 +140,14 @@ struct wpan_phy {
84 */ 140 */
85 u8 current_channel; 141 u8 current_channel;
86 u8 current_page; 142 u8 current_page;
87 u32 channels_supported[IEEE802154_MAX_PAGE + 1]; 143 struct wpan_phy_supported supported;
88 s8 transmit_power; 144 /* current transmit_power in mBm */
145 s32 transmit_power;
89 struct wpan_phy_cca cca; 146 struct wpan_phy_cca cca;
90 147
91 __le64 perm_extended_addr; 148 __le64 perm_extended_addr;
92 149
150 /* current cca ed threshold in mBm */
93 s32 cca_ed_level; 151 s32 cca_ed_level;
94 152
95 /* PHY depended MAC PIB values */ 153 /* PHY depended MAC PIB values */
@@ -121,9 +179,9 @@ struct wpan_dev {
121 __le64 extended_addr; 179 __le64 extended_addr;
122 180
123 /* MAC BSN field */ 181 /* MAC BSN field */
124 u8 bsn; 182 atomic_t bsn;
125 /* MAC DSN field */ 183 /* MAC DSN field */
126 u8 dsn; 184 atomic_t dsn;
127 185
128 u8 min_be; 186 u8 min_be;
129 u8 max_be; 187 u8 max_be;
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 0a55ac715077..2d1d73cb773e 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,7 +122,9 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
122 122
123static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) 123static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
124{ 124{
125 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to)); 125 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
126
127 *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
126} 128}
127 129
128/* Implements RFC 1624 (Incremental Internet Checksum) 130/* Implements RFC 1624 (Incremental Internet Checksum)
diff --git a/include/net/codel.h b/include/net/codel.h
index 1e18005f7f65..267e70210061 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -7,7 +7,7 @@
7 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> 7 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
8 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> 8 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> 9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10 * Copyright (C) 2012 Eric Dumazet <edumazet@google.com> 10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
@@ -119,12 +119,14 @@ static inline u32 codel_time_to_us(codel_time_t val)
119/** 119/**
120 * struct codel_params - contains codel parameters 120 * struct codel_params - contains codel parameters
121 * @target: target queue size (in time units) 121 * @target: target queue size (in time units)
122 * @ce_threshold: threshold for marking packets with ECN CE
122 * @interval: width of moving time window 123 * @interval: width of moving time window
123 * @mtu: device mtu, or minimal queue backlog in bytes. 124 * @mtu: device mtu, or minimal queue backlog in bytes.
124 * @ecn: is Explicit Congestion Notification enabled 125 * @ecn: is Explicit Congestion Notification enabled
125 */ 126 */
126struct codel_params { 127struct codel_params {
127 codel_time_t target; 128 codel_time_t target;
129 codel_time_t ce_threshold;
128 codel_time_t interval; 130 codel_time_t interval;
129 u32 mtu; 131 u32 mtu;
130 bool ecn; 132 bool ecn;
@@ -161,19 +163,24 @@ struct codel_vars {
161 * @maxpacket: largest packet we've seen so far 163 * @maxpacket: largest packet we've seen so far
162 * @drop_count: temp count of dropped packets in dequeue() 164 * @drop_count: temp count of dropped packets in dequeue()
163 * ecn_mark: number of packets we ECN marked instead of dropping 165 * ecn_mark: number of packets we ECN marked instead of dropping
166 * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
164 */ 167 */
165struct codel_stats { 168struct codel_stats {
166 u32 maxpacket; 169 u32 maxpacket;
167 u32 drop_count; 170 u32 drop_count;
168 u32 ecn_mark; 171 u32 ecn_mark;
172 u32 ce_mark;
169}; 173};
170 174
175#define CODEL_DISABLED_THRESHOLD INT_MAX
176
171static void codel_params_init(struct codel_params *params, 177static void codel_params_init(struct codel_params *params,
172 const struct Qdisc *sch) 178 const struct Qdisc *sch)
173{ 179{
174 params->interval = MS2TIME(100); 180 params->interval = MS2TIME(100);
175 params->target = MS2TIME(5); 181 params->target = MS2TIME(5);
176 params->mtu = psched_mtu(qdisc_dev(sch)); 182 params->mtu = psched_mtu(qdisc_dev(sch));
183 params->ce_threshold = CODEL_DISABLED_THRESHOLD;
177 params->ecn = false; 184 params->ecn = false;
178} 185}
179 186
@@ -354,6 +361,9 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
354 vars->rec_inv_sqrt); 361 vars->rec_inv_sqrt);
355 } 362 }
356end: 363end:
364 if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
365 INET_ECN_set_ce(skb))
366 stats->ce_mark++;
357 return skb; 367 return skb;
358} 368}
359#endif 369#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 0fb99a26e973..2bc73f8a00a9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -109,7 +109,6 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
109extern const u32 dst_default_metrics[]; 109extern const u32 dst_default_metrics[];
110 110
111#define DST_METRICS_READ_ONLY 0x1UL 111#define DST_METRICS_READ_ONLY 0x1UL
112#define DST_METRICS_FORCE_OVERWRITE 0x2UL
113#define DST_METRICS_FLAGS 0x3UL 112#define DST_METRICS_FLAGS 0x3UL
114#define __DST_METRICS_PTR(Y) \ 113#define __DST_METRICS_PTR(Y) \
115 ((u32 *)((Y) & ~DST_METRICS_FLAGS)) 114 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
@@ -120,11 +119,6 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
120 return dst->_metrics & DST_METRICS_READ_ONLY; 119 return dst->_metrics & DST_METRICS_READ_ONLY;
121} 120}
122 121
123static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
124{
125 dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
126}
127
128void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); 122void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
129 123
130static inline void dst_destroy_metrics_generic(struct dst_entry *dst) 124static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
@@ -355,18 +349,6 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
355 __skb_tunnel_rx(skb, dev, net); 349 __skb_tunnel_rx(skb, dev, net);
356} 350}
357 351
358/* Children define the path of the packet through the
359 * Linux networking. Thus, destinations are stackable.
360 */
361
362static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
363{
364 struct dst_entry *child = dst_clone(skb_dst(skb)->child);
365
366 skb_dst_drop(skb);
367 return child;
368}
369
370int dst_discard_sk(struct sock *sk, struct sk_buff *skb); 352int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
371static inline int dst_discard(struct sk_buff *skb) 353static inline int dst_discard(struct sk_buff *skb)
372{ 354{
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 6d67383a5114..903a55efbffe 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -36,7 +36,8 @@ struct fib_lookup_arg {
36 void *result; 36 void *result;
37 struct fib_rule *rule; 37 struct fib_rule *rule;
38 int flags; 38 int flags;
39#define FIB_LOOKUP_NOREF 1 39#define FIB_LOOKUP_NOREF 1
40#define FIB_LOOKUP_IGNORE_LINKSTATE 2
40}; 41};
41 42
42struct fib_rules_ops { 43struct fib_rules_ops {
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
new file mode 100644
index 000000000000..1a8c22419936
--- /dev/null
+++ b/include/net/flow_dissector.h
@@ -0,0 +1,220 @@
1#ifndef _NET_FLOW_DISSECTOR_H
2#define _NET_FLOW_DISSECTOR_H
3
4#include <linux/types.h>
5#include <linux/skbuff.h>
6#include <linux/in6.h>
7#include <uapi/linux/if_ether.h>
8
9/**
10 * struct flow_dissector_key_control:
11 * @thoff: Transport header offset
12 */
13struct flow_dissector_key_control {
14 u16 thoff;
15 u16 addr_type;
16};
17
18/**
19 * struct flow_dissector_key_basic:
20 * @thoff: Transport header offset
21 * @n_proto: Network header protocol (eg. IPv4/IPv6)
22 * @ip_proto: Transport header protocol (eg. TCP/UDP)
23 */
24struct flow_dissector_key_basic {
25 __be16 n_proto;
26 u8 ip_proto;
27 u8 padding;
28};
29
30struct flow_dissector_key_tags {
31 u32 vlan_id:12,
32 flow_label:20;
33};
34
35struct flow_dissector_key_keyid {
36 __be32 keyid;
37};
38
39/**
40 * struct flow_dissector_key_ipv4_addrs:
41 * @src: source ip address
42 * @dst: destination ip address
43 */
44struct flow_dissector_key_ipv4_addrs {
45 /* (src,dst) must be grouped, in the same way than in IP header */
46 __be32 src;
47 __be32 dst;
48};
49
50/**
51 * struct flow_dissector_key_ipv6_addrs:
52 * @src: source ip address
53 * @dst: destination ip address
54 */
55struct flow_dissector_key_ipv6_addrs {
56 /* (src,dst) must be grouped, in the same way than in IP header */
57 struct in6_addr src;
58 struct in6_addr dst;
59};
60
61/**
62 * struct flow_dissector_key_tipc_addrs:
63 * @srcnode: source node address
64 */
65struct flow_dissector_key_tipc_addrs {
66 __be32 srcnode;
67};
68
69/**
70 * struct flow_dissector_key_addrs:
71 * @v4addrs: IPv4 addresses
72 * @v6addrs: IPv6 addresses
73 */
74struct flow_dissector_key_addrs {
75 union {
76 struct flow_dissector_key_ipv4_addrs v4addrs;
77 struct flow_dissector_key_ipv6_addrs v6addrs;
78 struct flow_dissector_key_tipc_addrs tipcaddrs;
79 };
80};
81
82/**
83 * flow_dissector_key_tp_ports:
84 * @ports: port numbers of Transport header
85 * src: source port number
86 * dst: destination port number
87 */
88struct flow_dissector_key_ports {
89 union {
90 __be32 ports;
91 struct {
92 __be16 src;
93 __be16 dst;
94 };
95 };
96};
97
98
99/**
100 * struct flow_dissector_key_eth_addrs:
101 * @src: source Ethernet address
102 * @dst: destination Ethernet address
103 */
104struct flow_dissector_key_eth_addrs {
105 /* (dst,src) must be grouped, in the same way than in ETH header */
106 unsigned char dst[ETH_ALEN];
107 unsigned char src[ETH_ALEN];
108};
109
110enum flow_dissector_key_id {
111 FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
112 FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
113 FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
114 FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
115 FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
116 FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
117 FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
118 FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
119 FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
120 FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
121 FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
122
123 FLOW_DISSECTOR_KEY_MAX,
124};
125
126struct flow_dissector_key {
127 enum flow_dissector_key_id key_id;
128 size_t offset; /* offset of struct flow_dissector_key_*
129 in target the struct */
130};
131
132struct flow_dissector {
133 unsigned int used_keys; /* each bit repesents presence of one key id */
134 unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
135};
136
137void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
138 const struct flow_dissector_key *key,
139 unsigned int key_count);
140
141bool __skb_flow_dissect(const struct sk_buff *skb,
142 struct flow_dissector *flow_dissector,
143 void *target_container,
144 void *data, __be16 proto, int nhoff, int hlen);
145
146static inline bool skb_flow_dissect(const struct sk_buff *skb,
147 struct flow_dissector *flow_dissector,
148 void *target_container)
149{
150 return __skb_flow_dissect(skb, flow_dissector, target_container,
151 NULL, 0, 0, 0);
152}
153
154struct flow_keys {
155 struct flow_dissector_key_control control;
156#define FLOW_KEYS_HASH_START_FIELD basic
157 struct flow_dissector_key_basic basic;
158 struct flow_dissector_key_tags tags;
159 struct flow_dissector_key_keyid keyid;
160 struct flow_dissector_key_ports ports;
161 struct flow_dissector_key_addrs addrs;
162};
163
164#define FLOW_KEYS_HASH_OFFSET \
165 offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD)
166
167__be32 flow_get_u32_src(const struct flow_keys *flow);
168__be32 flow_get_u32_dst(const struct flow_keys *flow);
169
170extern struct flow_dissector flow_keys_dissector;
171extern struct flow_dissector flow_keys_buf_dissector;
172
173static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
174 struct flow_keys *flow)
175{
176 memset(flow, 0, sizeof(*flow));
177 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
178 NULL, 0, 0, 0);
179}
180
181static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
182 void *data, __be16 proto,
183 int nhoff, int hlen)
184{
185 memset(flow, 0, sizeof(*flow));
186 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
187 data, proto, nhoff, hlen);
188}
189
190__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
191 void *data, int hlen_proto);
192
193static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
194 int thoff, u8 ip_proto)
195{
196 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
197}
198
199u32 flow_hash_from_keys(struct flow_keys *keys);
200void __skb_get_hash(struct sk_buff *skb);
201u32 skb_get_poff(const struct sk_buff *skb);
202u32 __skb_get_poff(const struct sk_buff *skb, void *data,
203 const struct flow_keys *keys, int hlen);
204
205/* struct flow_keys_digest:
206 *
207 * This structure is used to hold a digest of the full flow keys. This is a
208 * larger "hash" of a flow to allow definitively matching specific flows where
209 * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
210 * that it can by used in CB of skb (see sch_choke for an example).
211 */
212#define FLOW_KEYS_DIGEST_LEN 16
213struct flow_keys_digest {
214 u8 data[FLOW_KEYS_DIGEST_LEN];
215};
216
217void make_flow_keys_digest(struct flow_keys_digest *digest,
218 const struct flow_keys *flow);
219
220#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
deleted file mode 100644
index dc8fd81412bf..000000000000
--- a/include/net/flow_keys.h
+++ /dev/null
@@ -1,45 +0,0 @@
1#ifndef _NET_FLOW_KEYS_H
2#define _NET_FLOW_KEYS_H
3
4/* struct flow_keys:
5 * @src: source ip address in case of IPv4
6 * For IPv6 it contains 32bit hash of src address
7 * @dst: destination ip address in case of IPv4
8 * For IPv6 it contains 32bit hash of dst address
9 * @ports: port numbers of Transport header
10 * port16[0]: src port number
11 * port16[1]: dst port number
12 * @thoff: Transport header offset
13 * @n_proto: Network header protocol (eg. IPv4/IPv6)
14 * @ip_proto: Transport header protocol (eg. TCP/UDP)
15 * All the members, except thoff, are in network byte order.
16 */
17struct flow_keys {
18 /* (src,dst) must be grouped, in the same way than in IP header */
19 __be32 src;
20 __be32 dst;
21 union {
22 __be32 ports;
23 __be16 port16[2];
24 };
25 u16 thoff;
26 __be16 n_proto;
27 u8 ip_proto;
28};
29
30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
31 void *data, __be16 proto, int nhoff, int hlen);
32static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
33{
34 return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
35}
36__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
37 void *data, int hlen_proto);
38static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
39{
40 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
41}
42u32 flow_hash_from_keys(struct flow_keys *keys);
43unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
44 __be16 protocol);
45#endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 14fb8d3390b4..2a0543a1899d 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,6 +62,11 @@ struct genevehdr {
62 struct geneve_opt options[]; 62 struct geneve_opt options[];
63}; 63};
64 64
65static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
66{
67 return (struct genevehdr *)(udp_hdr(skb) + 1);
68}
69
65#ifdef CONFIG_INET 70#ifdef CONFIG_INET
66struct geneve_sock; 71struct geneve_sock;
67 72
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 94a297052442..2c10a9f0c6d9 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -346,15 +346,15 @@ struct ieee802154_mac_params {
346struct wpan_phy; 346struct wpan_phy;
347 347
348enum { 348enum {
349 IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0, 349 IEEE802154_LLSEC_PARAM_ENABLED = BIT(0),
350 IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1, 350 IEEE802154_LLSEC_PARAM_FRAME_COUNTER = BIT(1),
351 IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2, 351 IEEE802154_LLSEC_PARAM_OUT_LEVEL = BIT(2),
352 IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3, 352 IEEE802154_LLSEC_PARAM_OUT_KEY = BIT(3),
353 IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4, 353 IEEE802154_LLSEC_PARAM_KEY_SOURCE = BIT(4),
354 IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5, 354 IEEE802154_LLSEC_PARAM_PAN_ID = BIT(5),
355 IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6, 355 IEEE802154_LLSEC_PARAM_HWADDR = BIT(6),
356 IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7, 356 IEEE802154_LLSEC_PARAM_COORD_HWADDR = BIT(7),
357 IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8, 357 IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = BIT(8),
358}; 358};
359 359
360struct ieee802154_llsec_ops { 360struct ieee802154_llsec_ops {
@@ -422,16 +422,6 @@ struct ieee802154_mlme_ops {
422 struct ieee802154_mac_params *params); 422 struct ieee802154_mac_params *params);
423 423
424 struct ieee802154_llsec_ops *llsec; 424 struct ieee802154_llsec_ops *llsec;
425
426 /* The fields below are required. */
427
428 /*
429 * FIXME: these should become the part of PIB/MIB interface.
430 * However we still don't have IB interface of any kind
431 */
432 __le16 (*get_pan_id)(const struct net_device *dev);
433 __le16 (*get_short_addr)(const struct net_device *dev);
434 u8 (*get_dsn)(const struct net_device *dev);
435}; 425};
436 426
437static inline struct ieee802154_mlme_ops * 427static inline struct ieee802154_mlme_ops *
@@ -440,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev)
440 return dev->ml_priv; 430 return dev->ml_priv;
441} 431}
442 432
443static inline struct ieee802154_reduced_mlme_ops *
444ieee802154_reduced_mlme_ops(const struct net_device *dev)
445{
446 return dev->ml_priv;
447}
448
449#endif 433#endif
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 4a92423eefa5..279f83591971 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -41,7 +41,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
41 41
42static inline void inet_ctl_sock_destroy(struct sock *sk) 42static inline void inet_ctl_sock_destroy(struct sock *sk)
43{ 43{
44 sk_release_kernel(sk); 44 sock_release(sk->sk_socket);
45} 45}
46 46
47#endif 47#endif
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 8d1765577acc..53eead2da743 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,13 +21,11 @@ struct netns_frags {
21 * @INET_FRAG_FIRST_IN: first fragment has arrived 21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived 22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction 23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
25 */ 24 */
26enum { 25enum {
27 INET_FRAG_FIRST_IN = BIT(0), 26 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1), 27 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2), 28 INET_FRAG_COMPLETE = BIT(2),
30 INET_FRAG_EVICTED = BIT(3)
31}; 29};
32 30
33/** 31/**
@@ -43,8 +41,9 @@ enum {
43 * @len: total length of the original datagram 41 * @len: total length of the original datagram
44 * @meat: length of received fragments so far 42 * @meat: length of received fragments so far
45 * @flags: fragment queue flags 43 * @flags: fragment queue flags
46 * @max_size: (ipv4 only) maximum received fragment size with IP_DF set 44 * @max_size: maximum received fragment size
47 * @net: namespace that this frag belongs to 45 * @net: namespace that this frag belongs to
46 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
48 */ 47 */
49struct inet_frag_queue { 48struct inet_frag_queue {
50 spinlock_t lock; 49 spinlock_t lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
59 __u8 flags; 58 __u8 flags;
60 u16 max_size; 59 u16 max_size;
61 struct netns_frags *net; 60 struct netns_frags *net;
61 struct hlist_node list_evictor;
62}; 62};
63 63
64#define INETFRAGS_HASHSZ 1024 64#define INETFRAGS_HASHSZ 1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
125 inet_frag_destroy(q, f); 125 inet_frag_destroy(q, f);
126} 126}
127 127
128static inline bool inet_frag_evicting(struct inet_frag_queue *q)
129{
130 return !hlist_unhashed(&q->list_evictor);
131}
132
128/* Memory Tracking Functions. */ 133/* Memory Tracking Functions. */
129 134
130/* The default percpu_counter batch size is not big enough to scale to 135/* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
139 return percpu_counter_read(&nf->mem); 144 return percpu_counter_read(&nf->mem);
140} 145}
141 146
142static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 147static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
143{ 148{
144 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 149 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
145} 150}
146 151
147static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 152static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
148{ 153{
149 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 154 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
150} 155}
151 156
152static inline void init_frag_mem_limit(struct netns_frags *nf) 157static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 73fe0f9525d9..b73c88a19dd4 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -24,7 +24,6 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/vmalloc.h>
28 27
29#include <net/inet_connection_sock.h> 28#include <net/inet_connection_sock.h>
30#include <net/inet_sock.h> 29#include <net/inet_sock.h>
@@ -148,8 +147,6 @@ struct inet_hashinfo {
148 */ 147 */
149 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] 148 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
150 ____cacheline_aligned_in_smp; 149 ____cacheline_aligned_in_smp;
151
152 atomic_t bsockets;
153}; 150};
154 151
155static inline struct inet_ehash_bucket *inet_ehash_bucket( 152static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -166,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp(
166 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; 163 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
167} 164}
168 165
169static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 166int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
170{
171 unsigned int i, size = 256;
172#if defined(CONFIG_PROVE_LOCKING)
173 unsigned int nr_pcpus = 2;
174#else
175 unsigned int nr_pcpus = num_possible_cpus();
176#endif
177 if (nr_pcpus >= 4)
178 size = 512;
179 if (nr_pcpus >= 8)
180 size = 1024;
181 if (nr_pcpus >= 16)
182 size = 2048;
183 if (nr_pcpus >= 32)
184 size = 4096;
185 if (sizeof(spinlock_t) != 0) {
186#ifdef CONFIG_NUMA
187 if (size * sizeof(spinlock_t) > PAGE_SIZE)
188 hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
189 else
190#endif
191 hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
192 GFP_KERNEL);
193 if (!hashinfo->ehash_locks)
194 return ENOMEM;
195 for (i = 0; i < size; i++)
196 spin_lock_init(&hashinfo->ehash_locks[i]);
197 }
198 hashinfo->ehash_locks_mask = size - 1;
199 return 0;
200}
201 167
202static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) 168static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
203{ 169{
204 if (hashinfo->ehash_locks) { 170 kvfree(hashinfo->ehash_locks);
205#ifdef CONFIG_NUMA 171 hashinfo->ehash_locks = NULL;
206 unsigned int size = (hashinfo->ehash_locks_mask + 1) *
207 sizeof(spinlock_t);
208 if (size > PAGE_SIZE)
209 vfree(hashinfo->ehash_locks);
210 else
211#endif
212 kfree(hashinfo->ehash_locks);
213 hashinfo->ehash_locks = NULL;
214 }
215} 172}
216 173
217struct inet_bind_bucket * 174struct inet_bind_bucket *
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b6c3737da4e9..47eb67b08abd 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -187,6 +187,7 @@ struct inet_sock {
187 transparent:1, 187 transparent:1,
188 mc_all:1, 188 mc_all:1,
189 nodefrag:1; 189 nodefrag:1;
190 __u8 bind_address_no_port:1;
190 __u8 rcv_tos; 191 __u8 rcv_tos;
191 __u8 convert_csum; 192 __u8 convert_csum;
192 int uc_index; 193 int uc_index;
diff --git a/include/net/ip.h b/include/net/ip.h
index d14af7edd197..d5fe9f2ab699 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -31,7 +31,7 @@
31#include <net/route.h> 31#include <net/route.h>
32#include <net/snmp.h> 32#include <net/snmp.h>
33#include <net/flow.h> 33#include <net/flow.h>
34#include <net/flow_keys.h> 34#include <net/flow_dissector.h>
35 35
36struct sock; 36struct sock;
37 37
@@ -45,6 +45,7 @@ struct inet_skb_parm {
45#define IPSKB_FRAG_COMPLETE BIT(3) 45#define IPSKB_FRAG_COMPLETE BIT(3)
46#define IPSKB_REROUTED BIT(4) 46#define IPSKB_REROUTED BIT(4)
47#define IPSKB_DOREDIRECT BIT(5) 47#define IPSKB_DOREDIRECT BIT(5)
48#define IPSKB_FRAG_PMTU BIT(6)
48 49
49 u16 frag_max_size; 50 u16 frag_max_size;
50}; 51};
@@ -108,9 +109,8 @@ int ip_local_deliver(struct sk_buff *skb);
108int ip_mr_input(struct sk_buff *skb); 109int ip_mr_input(struct sk_buff *skb);
109int ip_output(struct sock *sk, struct sk_buff *skb); 110int ip_output(struct sock *sk, struct sk_buff *skb);
110int ip_mc_output(struct sock *sk, struct sk_buff *skb); 111int ip_mc_output(struct sock *sk, struct sk_buff *skb);
111int ip_fragment(struct sock *sk, struct sk_buff *skb, 112int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
112 int (*output)(struct sock *, struct sk_buff *)); 113 int (*output)(struct sock *, struct sk_buff *));
113int ip_do_nat(struct sk_buff *skb);
114void ip_send_check(struct iphdr *ip); 114void ip_send_check(struct iphdr *ip);
115int __ip_local_out(struct sk_buff *skb); 115int __ip_local_out(struct sk_buff *skb);
116int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); 116int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
161} 161}
162 162
163/* datagram.c */ 163/* datagram.c */
164int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
164int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
165 166
166void ip4_datagram_release_cb(struct sock *sk); 167void ip4_datagram_release_cb(struct sock *sk);
@@ -355,15 +356,32 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
355 skb->len, proto, 0); 356 skb->len, proto, 0);
356} 357}
357 358
359/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
360 * Equivalent to : flow->v4addrs.src = iph->saddr;
361 * flow->v4addrs.dst = iph->daddr;
362 */
363static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
364 const struct iphdr *iph)
365{
366 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
367 offsetof(typeof(flow->addrs), v4addrs.src) +
368 sizeof(flow->addrs.v4addrs.src));
369 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
370 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
371}
372
358static inline void inet_set_txhash(struct sock *sk) 373static inline void inet_set_txhash(struct sock *sk)
359{ 374{
360 struct inet_sock *inet = inet_sk(sk); 375 struct inet_sock *inet = inet_sk(sk);
361 struct flow_keys keys; 376 struct flow_keys keys;
362 377
363 keys.src = inet->inet_saddr; 378 memset(&keys, 0, sizeof(keys));
364 keys.dst = inet->inet_daddr; 379
365 keys.port16[0] = inet->inet_sport; 380 keys.addrs.v4addrs.src = inet->inet_saddr;
366 keys.port16[1] = inet->inet_dport; 381 keys.addrs.v4addrs.dst = inet->inet_daddr;
382 keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
383 keys.ports.src = inet->inet_sport;
384 keys.ports.dst = inet->inet_dport;
367 385
368 sk->sk_txhash = flow_hash_from_keys(&keys); 386 sk->sk_txhash = flow_hash_from_keys(&keys);
369} 387}
@@ -478,6 +496,16 @@ enum ip_defrag_users {
478 IP_DEFRAG_MACVLAN, 496 IP_DEFRAG_MACVLAN,
479}; 497};
480 498
499/* Return true if the value of 'user' is between 'lower_bond'
500 * and 'upper_bond' inclusively.
501 */
502static inline bool ip_defrag_user_in_between(u32 user,
503 enum ip_defrag_users lower_bond,
504 enum ip_defrag_users upper_bond)
505{
506 return user >= lower_bond && user <= upper_bond;
507}
508
481int ip_defrag(struct sk_buff *skb, u32 user); 509int ip_defrag(struct sk_buff *skb, u32 user);
482#ifdef CONFIG_INET 510#ifdef CONFIG_INET
483struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user); 511struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 20e80fa7bbdd..3b76849c190f 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -120,45 +120,19 @@ struct rt6_info {
120 struct rt6key rt6i_src; 120 struct rt6key rt6i_src;
121 struct rt6key rt6i_prefsrc; 121 struct rt6key rt6i_prefsrc;
122 122
123 struct list_head rt6i_uncached;
124 struct uncached_list *rt6i_uncached_list;
125
123 struct inet6_dev *rt6i_idev; 126 struct inet6_dev *rt6i_idev;
124 unsigned long _rt6i_peer; 127 struct rt6_info * __percpu *rt6i_pcpu;
125 128
126 u32 rt6i_metric; 129 u32 rt6i_metric;
130 u32 rt6i_pmtu;
127 /* more non-fragment space at head required */ 131 /* more non-fragment space at head required */
128 unsigned short rt6i_nfheader_len; 132 unsigned short rt6i_nfheader_len;
129 u8 rt6i_protocol; 133 u8 rt6i_protocol;
130}; 134};
131 135
132static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
133{
134 return inetpeer_ptr(rt->_rt6i_peer);
135}
136
137static inline bool rt6_has_peer(struct rt6_info *rt)
138{
139 return inetpeer_ptr_is_peer(rt->_rt6i_peer);
140}
141
142static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
143{
144 __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
145}
146
147static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
148{
149 return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
150}
151
152static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
153{
154 inetpeer_init_ptr(&rt->_rt6i_peer, base);
155}
156
157static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
158{
159 inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
160}
161
162static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) 136static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
163{ 137{
164 return ((struct rt6_info *)dst)->rt6i_idev; 138 return ((struct rt6_info *)dst)->rt6i_idev;
@@ -189,13 +163,12 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
189 rt0->rt6i_flags |= RTF_EXPIRES; 163 rt0->rt6i_flags |= RTF_EXPIRES;
190} 164}
191 165
192static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) 166static inline u32 rt6_get_cookie(const struct rt6_info *rt)
193{ 167{
194 struct dst_entry *new = (struct dst_entry *) from; 168 if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
169 rt = (struct rt6_info *)(rt->dst.from);
195 170
196 rt->rt6i_flags &= ~RTF_EXPIRES; 171 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
197 dst_hold(new);
198 rt->dst.from = new;
199} 172}
200 173
201static inline void ip6_rt_put(struct rt6_info *rt) 174static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5e192068e6cb..297629aadb19 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -145,7 +145,7 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
145#ifdef CONFIG_IPV6_SUBTREES 145#ifdef CONFIG_IPV6_SUBTREES
146 np->saddr_cache = saddr; 146 np->saddr_cache = saddr;
147#endif 147#endif
148 np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 148 np->dst_cookie = rt6_get_cookie(rt);
149} 149}
150 150
151static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 151static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
@@ -163,11 +163,14 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
163 return rt->rt6i_flags & RTF_LOCAL; 163 return rt->rt6i_flags & RTF_LOCAL;
164} 164}
165 165
166static inline bool ipv6_anycast_destination(const struct sk_buff *skb) 166static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
167 const struct in6_addr *daddr)
167{ 168{
168 struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); 169 struct rt6_info *rt = (struct rt6_info *)dst;
169 170
170 return rt->rt6i_flags & RTF_ANYCAST; 171 return rt->rt6i_flags & RTF_ANYCAST ||
172 (rt->rt6i_dst.plen != 128 &&
173 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
171} 174}
172 175
173int ip6_fragment(struct sock *sk, struct sk_buff *skb, 176int ip6_fragment(struct sock *sk, struct sk_buff *skb,
@@ -194,9 +197,15 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk)
194 inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; 197 inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
195} 198}
196 199
197static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt) 200static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
201 struct in6_addr *daddr)
198{ 202{
199 return &rt->rt6i_gateway; 203 if (rt->rt6i_flags & RTF_GATEWAY)
204 return &rt->rt6i_gateway;
205 else if (unlikely(rt->rt6i_flags & RTF_CACHE))
206 return &rt->rt6i_dst.addr;
207 else
208 return daddr;
200} 209}
201 210
202#endif 211#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 54271ed0ed45..5fa643b4e891 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -183,7 +183,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
183struct fib_table { 183struct fib_table {
184 struct hlist_node tb_hlist; 184 struct hlist_node tb_hlist;
185 u32 tb_id; 185 u32 tb_id;
186 int tb_default;
187 int tb_num_default; 186 int tb_num_default;
188 struct rcu_head rcu; 187 struct rcu_head rcu;
189 unsigned long *tb_data; 188 unsigned long *tb_data;
@@ -226,7 +225,7 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
226} 225}
227 226
228static inline int fib_lookup(struct net *net, const struct flowi4 *flp, 227static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
229 struct fib_result *res) 228 struct fib_result *res, unsigned int flags)
230{ 229{
231 struct fib_table *tb; 230 struct fib_table *tb;
232 int err = -ENETUNREACH; 231 int err = -ENETUNREACH;
@@ -234,7 +233,7 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
234 rcu_read_lock(); 233 rcu_read_lock();
235 234
236 tb = fib_get_table(net, RT_TABLE_MAIN); 235 tb = fib_get_table(net, RT_TABLE_MAIN);
237 if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) 236 if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF))
238 err = 0; 237 err = 0;
239 238
240 rcu_read_unlock(); 239 rcu_read_unlock();
@@ -249,16 +248,18 @@ void __net_exit fib4_rules_exit(struct net *net);
249struct fib_table *fib_new_table(struct net *net, u32 id); 248struct fib_table *fib_new_table(struct net *net, u32 id);
250struct fib_table *fib_get_table(struct net *net, u32 id); 249struct fib_table *fib_get_table(struct net *net, u32 id);
251 250
252int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res); 251int __fib_lookup(struct net *net, struct flowi4 *flp,
252 struct fib_result *res, unsigned int flags);
253 253
254static inline int fib_lookup(struct net *net, struct flowi4 *flp, 254static inline int fib_lookup(struct net *net, struct flowi4 *flp,
255 struct fib_result *res) 255 struct fib_result *res, unsigned int flags)
256{ 256{
257 struct fib_table *tb; 257 struct fib_table *tb;
258 int err; 258 int err;
259 259
260 flags |= FIB_LOOKUP_NOREF;
260 if (net->ipv4.fib_has_custom_rules) 261 if (net->ipv4.fib_has_custom_rules)
261 return __fib_lookup(net, flp, res); 262 return __fib_lookup(net, flp, res, flags);
262 263
263 rcu_read_lock(); 264 rcu_read_lock();
264 265
@@ -266,11 +267,11 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
266 267
267 for (err = 0; !err; err = -ENETUNREACH) { 268 for (err = 0; !err; err = -ENETUNREACH) {
268 tb = rcu_dereference_rtnl(net->ipv4.fib_main); 269 tb = rcu_dereference_rtnl(net->ipv4.fib_main);
269 if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) 270 if (tb && !fib_table_lookup(tb, flp, res, flags))
270 break; 271 break;
271 272
272 tb = rcu_dereference_rtnl(net->ipv4.fib_default); 273 tb = rcu_dereference_rtnl(net->ipv4.fib_default);
273 if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) 274 if (tb && !fib_table_lookup(tb, flp, res, flags))
274 break; 275 break;
275 } 276 }
276 277
@@ -288,7 +289,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
288int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, 289int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
289 u8 tos, int oif, struct net_device *dev, 290 u8 tos, int oif, struct net_device *dev,
290 struct in_device *idev, u32 *itag); 291 struct in_device *idev, u32 *itag);
291void fib_select_default(struct fib_result *res); 292void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
292#ifdef CONFIG_IP_ROUTE_CLASSID 293#ifdef CONFIG_IP_ROUTE_CLASSID
293static inline int fib_num_tclassid_users(struct net *net) 294static inline int fib_num_tclassid_users(struct net *net)
294{ 295{
@@ -305,9 +306,9 @@ void fib_flush_external(struct net *net);
305 306
306/* Exported by fib_semantics.c */ 307/* Exported by fib_semantics.c */
307int ip_fib_check_default(__be32 gw, struct net_device *dev); 308int ip_fib_check_default(__be32 gw, struct net_device *dev);
308int fib_sync_down_dev(struct net_device *dev, int force); 309int fib_sync_down_dev(struct net_device *dev, unsigned long event);
309int fib_sync_down_addr(struct net *net, __be32 local); 310int fib_sync_down_addr(struct net *net, __be32 local);
310int fib_sync_up(struct net_device *dev); 311int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
311void fib_select_multipath(struct fib_result *res); 312void fib_select_multipath(struct fib_result *res);
312 313
313/* Exported by fib_trie.c */ 314/* Exported by fib_trie.c */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index eec8ad3c9843..82dbdb092a5d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -19,7 +19,7 @@
19#include <net/if_inet6.h> 19#include <net/if_inet6.h>
20#include <net/ndisc.h> 20#include <net/ndisc.h>
21#include <net/flow.h> 21#include <net/flow.h>
22#include <net/flow_keys.h> 22#include <net/flow_dissector.h>
23#include <net/snmp.h> 23#include <net/snmp.h>
24 24
25#define SIN6_LEN_RFC2133 24 25#define SIN6_LEN_RFC2133 24
@@ -239,8 +239,10 @@ struct ip6_flowlabel {
239 struct net *fl_net; 239 struct net *fl_net;
240}; 240};
241 241
242#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) 242#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
243#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) 243#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
244#define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
245
244#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) 246#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
245#define IPV6_TCLASS_SHIFT 20 247#define IPV6_TCLASS_SHIFT 20
246 248
@@ -669,8 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
669 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
670} 672}
671 673
672void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr, 674__be32 ipv6_select_ident(struct net *net,
673 struct rt6_info *rt); 675 const struct in6_addr *daddr,
676 const struct in6_addr *saddr);
674void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); 677void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
675 678
676int ip6_dst_hoplimit(struct dst_entry *dst); 679int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -689,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
689 return hlimit; 692 return hlimit;
690} 693}
691 694
695/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
696 * Equivalent to : flow->v6addrs.src = iph->saddr;
697 * flow->v6addrs.dst = iph->daddr;
698 */
699static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
700 const struct ipv6hdr *iph)
701{
702 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
703 offsetof(typeof(flow->addrs), v6addrs.src) +
704 sizeof(flow->addrs.v6addrs.src));
705 memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
706 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
707}
708
692#if IS_ENABLED(CONFIG_IPV6) 709#if IS_ENABLED(CONFIG_IPV6)
693static inline void ip6_set_txhash(struct sock *sk) 710static inline void ip6_set_txhash(struct sock *sk)
694{ 711{
@@ -696,10 +713,15 @@ static inline void ip6_set_txhash(struct sock *sk)
696 struct ipv6_pinfo *np = inet6_sk(sk); 713 struct ipv6_pinfo *np = inet6_sk(sk);
697 struct flow_keys keys; 714 struct flow_keys keys;
698 715
699 keys.src = (__force __be32)ipv6_addr_hash(&np->saddr); 716 memset(&keys, 0, sizeof(keys));
700 keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr); 717
701 keys.port16[0] = inet->inet_sport; 718 memcpy(&keys.addrs.v6addrs.src, &np->saddr,
702 keys.port16[1] = inet->inet_dport; 719 sizeof(keys.addrs.v6addrs.src));
720 memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
721 sizeof(keys.addrs.v6addrs.dst));
722 keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
723 keys.ports.src = inet->inet_sport;
724 keys.ports.dst = inet->inet_dport;
703 725
704 sk->sk_txhash = flow_hash_from_keys(&keys); 726 sk->sk_txhash = flow_hash_from_keys(&keys);
705} 727}
@@ -719,6 +741,9 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
719 hash ^= hash >> 12; 741 hash ^= hash >> 12;
720 742
721 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; 743 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
744
745 if (net->ipv6.sysctl.flowlabel_state_ranges)
746 flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
722 } 747 }
723 748
724 return flowlabel; 749 return flowlabel;
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 0134681acc4c..fe994d2e5286 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -96,7 +96,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
96} 96}
97 97
98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, 98struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
99 struct proto *prot); 99 struct proto *prot, int kern);
100void llc_sk_free(struct sock *sk); 100void llc_sk_free(struct sock *sk);
101 101
102void llc_sk_reset(struct sock *sk); 102void llc_sk_reset(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index fc57f6b82fc5..6b1077c2a63f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -337,10 +337,16 @@ enum ieee80211_bss_change {
337 * enum ieee80211_event_type - event to be notified to the low level driver 337 * enum ieee80211_event_type - event to be notified to the low level driver
338 * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver. 338 * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
339 * @MLME_EVENT: event related to MLME 339 * @MLME_EVENT: event related to MLME
340 * @BAR_RX_EVENT: a BAR was received
341 * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because
342 * they timed out. This won't be called for each frame released, but only
343 * once each time the timeout triggers.
340 */ 344 */
341enum ieee80211_event_type { 345enum ieee80211_event_type {
342 RSSI_EVENT, 346 RSSI_EVENT,
343 MLME_EVENT, 347 MLME_EVENT,
348 BAR_RX_EVENT,
349 BA_FRAME_TIMEOUT,
344}; 350};
345 351
346/** 352/**
@@ -400,17 +406,31 @@ struct ieee80211_mlme_event {
400}; 406};
401 407
402/** 408/**
409 * struct ieee80211_ba_event - data attached for BlockAck related events
410 * @sta: pointer to the &ieee80211_sta to which this event relates
411 * @tid: the tid
412 * @ssn: the starting sequence number (for %BAR_RX_EVENT)
413 */
414struct ieee80211_ba_event {
415 struct ieee80211_sta *sta;
416 u16 tid;
417 u16 ssn;
418};
419
420/**
403 * struct ieee80211_event - event to be sent to the driver 421 * struct ieee80211_event - event to be sent to the driver
404 * @type: The event itself. See &enum ieee80211_event_type. 422 * @type: The event itself. See &enum ieee80211_event_type.
405 * @rssi: relevant if &type is %RSSI_EVENT 423 * @rssi: relevant if &type is %RSSI_EVENT
406 * @mlme: relevant if &type is %AUTH_EVENT 424 * @mlme: relevant if &type is %AUTH_EVENT
407 * @u: union holding the above two fields 425 * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
426 * @u:union holding the fields above
408 */ 427 */
409struct ieee80211_event { 428struct ieee80211_event {
410 enum ieee80211_event_type type; 429 enum ieee80211_event_type type;
411 union { 430 union {
412 struct ieee80211_rssi_event rssi; 431 struct ieee80211_rssi_event rssi;
413 struct ieee80211_mlme_event mlme; 432 struct ieee80211_mlme_event mlme;
433 struct ieee80211_ba_event ba;
414 } u; 434 } u;
415}; 435};
416 436
@@ -426,12 +446,8 @@ struct ieee80211_event {
426 * @ibss_creator: indicates if a new IBSS network is being created 446 * @ibss_creator: indicates if a new IBSS network is being created
427 * @aid: association ID number, valid only when @assoc is true 447 * @aid: association ID number, valid only when @assoc is true
428 * @use_cts_prot: use CTS protection 448 * @use_cts_prot: use CTS protection
429 * @use_short_preamble: use 802.11b short preamble; 449 * @use_short_preamble: use 802.11b short preamble
430 * if the hardware cannot handle this it must set the 450 * @use_short_slot: use short slot time (only relevant for ERP)
431 * IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag
432 * @use_short_slot: use short slot time (only relevant for ERP);
433 * if the hardware cannot handle this it must set the
434 * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
435 * @dtim_period: num of beacons before the next DTIM, for beaconing, 451 * @dtim_period: num of beacons before the next DTIM, for beaconing,
436 * valid in station mode only if after the driver was notified 452 * valid in station mode only if after the driver was notified
437 * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then. 453 * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
@@ -855,6 +871,9 @@ struct ieee80211_tx_info {
855 /* 4 bytes free */ 871 /* 4 bytes free */
856 } control; 872 } control;
857 struct { 873 struct {
874 u64 cookie;
875 } ack;
876 struct {
858 struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; 877 struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
859 s32 ack_signal; 878 s32 ack_signal;
860 u8 ampdu_ack_len; 879 u8 ampdu_ack_len;
@@ -1459,6 +1478,9 @@ enum ieee80211_key_flags {
1459 * wants to be given when a frame is transmitted and needs to be 1478 * wants to be given when a frame is transmitted and needs to be
1460 * encrypted in hardware. 1479 * encrypted in hardware.
1461 * @cipher: The key's cipher suite selector. 1480 * @cipher: The key's cipher suite selector.
1481 * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver
1482 * as well if it needs to do software PN assignment by itself
1483 * (e.g. due to TSO)
1462 * @flags: key flags, see &enum ieee80211_key_flags. 1484 * @flags: key flags, see &enum ieee80211_key_flags.
1463 * @keyidx: the key index (0-3) 1485 * @keyidx: the key index (0-3)
1464 * @keylen: key material length 1486 * @keylen: key material length
@@ -1471,6 +1493,7 @@ enum ieee80211_key_flags {
1471 * @iv_len: The IV length for this key type 1493 * @iv_len: The IV length for this key type
1472 */ 1494 */
1473struct ieee80211_key_conf { 1495struct ieee80211_key_conf {
1496 atomic64_t tx_pn;
1474 u32 cipher; 1497 u32 cipher;
1475 u8 icv_len; 1498 u8 icv_len;
1476 u8 iv_len; 1499 u8 iv_len;
@@ -1481,6 +1504,47 @@ struct ieee80211_key_conf {
1481 u8 key[0]; 1504 u8 key[0];
1482}; 1505};
1483 1506
1507#define IEEE80211_MAX_PN_LEN 16
1508
1509/**
1510 * struct ieee80211_key_seq - key sequence counter
1511 *
1512 * @tkip: TKIP data, containing IV32 and IV16 in host byte order
1513 * @ccmp: PN data, most significant byte first (big endian,
1514 * reverse order than in packet)
1515 * @aes_cmac: PN data, most significant byte first (big endian,
1516 * reverse order than in packet)
1517 * @aes_gmac: PN data, most significant byte first (big endian,
1518 * reverse order than in packet)
1519 * @gcmp: PN data, most significant byte first (big endian,
1520 * reverse order than in packet)
1521 * @hw: data for HW-only (e.g. cipher scheme) keys
1522 */
1523struct ieee80211_key_seq {
1524 union {
1525 struct {
1526 u32 iv32;
1527 u16 iv16;
1528 } tkip;
1529 struct {
1530 u8 pn[6];
1531 } ccmp;
1532 struct {
1533 u8 pn[6];
1534 } aes_cmac;
1535 struct {
1536 u8 pn[6];
1537 } aes_gmac;
1538 struct {
1539 u8 pn[6];
1540 } gcmp;
1541 struct {
1542 u8 seq[IEEE80211_MAX_PN_LEN];
1543 u8 seq_len;
1544 } hw;
1545 };
1546};
1547
1484/** 1548/**
1485 * struct ieee80211_cipher_scheme - cipher scheme 1549 * struct ieee80211_cipher_scheme - cipher scheme
1486 * 1550 *
@@ -1667,8 +1731,7 @@ struct ieee80211_tx_control {
1667 * @sta: station table entry, %NULL for per-vif queue 1731 * @sta: station table entry, %NULL for per-vif queue
1668 * @tid: the TID for this queue (unused for per-vif queue) 1732 * @tid: the TID for this queue (unused for per-vif queue)
1669 * @ac: the AC for this queue 1733 * @ac: the AC for this queue
1670 * @drv_priv: data area for driver use, will always be aligned to 1734 * @drv_priv: driver private area, sized by hw->txq_data_size
1671 * sizeof(void *).
1672 * 1735 *
1673 * The driver can obtain packets from this queue by calling 1736 * The driver can obtain packets from this queue by calling
1674 * ieee80211_tx_dequeue(). 1737 * ieee80211_tx_dequeue().
@@ -1717,13 +1780,6 @@ struct ieee80211_txq {
1717 * multicast frames when there are power saving stations so that 1780 * multicast frames when there are power saving stations so that
1718 * the driver can fetch them with ieee80211_get_buffered_bc(). 1781 * the driver can fetch them with ieee80211_get_buffered_bc().
1719 * 1782 *
1720 * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE:
1721 * Hardware is not capable of short slot operation on the 2.4 GHz band.
1722 *
1723 * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE:
1724 * Hardware is not capable of receiving frames with short preamble on
1725 * the 2.4 GHz band.
1726 *
1727 * @IEEE80211_HW_SIGNAL_UNSPEC: 1783 * @IEEE80211_HW_SIGNAL_UNSPEC:
1728 * Hardware can provide signal values but we don't know its units. We 1784 * Hardware can provide signal values but we don't know its units. We
1729 * expect values between 0 and @max_signal. 1785 * expect values between 0 and @max_signal.
@@ -1798,6 +1854,10 @@ struct ieee80211_txq {
1798 * the driver returns 1. This also forces the driver to advertise its 1854 * the driver returns 1. This also forces the driver to advertise its
1799 * supported cipher suites. 1855 * supported cipher suites.
1800 * 1856 *
1857 * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit,
1858 * this currently requires only the ability to calculate the duration
1859 * for frames.
1860 *
1801 * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface 1861 * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
1802 * queue mapping in order to use different queues (not just one per AC) 1862 * queue mapping in order to use different queues (not just one per AC)
1803 * for different virtual interfaces. See the doc section on HW queue 1863 * for different virtual interfaces. See the doc section on HW queue
@@ -1825,41 +1885,44 @@ struct ieee80211_txq {
1825 * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload 1885 * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
1826 * or tailroom of TX skbs without copying them first. 1886 * or tailroom of TX skbs without copying them first.
1827 * 1887 *
1828 * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands 1888 * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
1829 * in one command, mac80211 doesn't have to run separate scans per band. 1889 * in one command, mac80211 doesn't have to run separate scans per band.
1890 *
1891 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
1830 */ 1892 */
1831enum ieee80211_hw_flags { 1893enum ieee80211_hw_flags {
1832 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, 1894 IEEE80211_HW_HAS_RATE_CONTROL,
1833 IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, 1895 IEEE80211_HW_RX_INCLUDES_FCS,
1834 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2, 1896 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING,
1835 IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3, 1897 IEEE80211_HW_SIGNAL_UNSPEC,
1836 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, 1898 IEEE80211_HW_SIGNAL_DBM,
1837 IEEE80211_HW_SIGNAL_UNSPEC = 1<<5, 1899 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC,
1838 IEEE80211_HW_SIGNAL_DBM = 1<<6, 1900 IEEE80211_HW_SPECTRUM_MGMT,
1839 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 1<<7, 1901 IEEE80211_HW_AMPDU_AGGREGATION,
1840 IEEE80211_HW_SPECTRUM_MGMT = 1<<8, 1902 IEEE80211_HW_SUPPORTS_PS,
1841 IEEE80211_HW_AMPDU_AGGREGATION = 1<<9, 1903 IEEE80211_HW_PS_NULLFUNC_STACK,
1842 IEEE80211_HW_SUPPORTS_PS = 1<<10, 1904 IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
1843 IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, 1905 IEEE80211_HW_MFP_CAPABLE,
1844 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, 1906 IEEE80211_HW_WANT_MONITOR_VIF,
1845 IEEE80211_HW_MFP_CAPABLE = 1<<13, 1907 IEEE80211_HW_NO_AUTO_VIF,
1846 IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, 1908 IEEE80211_HW_SW_CRYPTO_CONTROL,
1847 IEEE80211_HW_NO_AUTO_VIF = 1<<15, 1909 IEEE80211_HW_SUPPORT_FAST_XMIT,
1848 IEEE80211_HW_SW_CRYPTO_CONTROL = 1<<16, 1910 IEEE80211_HW_REPORTS_TX_ACK_STATUS,
1849 /* free slots */ 1911 IEEE80211_HW_CONNECTION_MONITOR,
1850 IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, 1912 IEEE80211_HW_QUEUE_CONTROL,
1851 IEEE80211_HW_CONNECTION_MONITOR = 1<<19, 1913 IEEE80211_HW_SUPPORTS_PER_STA_GTK,
1852 IEEE80211_HW_QUEUE_CONTROL = 1<<20, 1914 IEEE80211_HW_AP_LINK_PS,
1853 IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21, 1915 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW,
1854 IEEE80211_HW_AP_LINK_PS = 1<<22, 1916 IEEE80211_HW_SUPPORTS_RC_TABLE,
1855 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23, 1917 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF,
1856 IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24, 1918 IEEE80211_HW_TIMING_BEACON_ONLY,
1857 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25, 1919 IEEE80211_HW_SUPPORTS_HT_CCK_RATES,
1858 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, 1920 IEEE80211_HW_CHANCTX_STA_CSA,
1859 IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, 1921 IEEE80211_HW_SUPPORTS_CLONED_SKBS,
1860 IEEE80211_HW_CHANCTX_STA_CSA = 1<<28, 1922 IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
1861 IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29, 1923
1862 IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30, 1924 /* keep last, obviously */
1925 NUM_IEEE80211_HW_FLAGS
1863}; 1926};
1864 1927
1865/** 1928/**
@@ -1940,8 +2003,8 @@ enum ieee80211_hw_flags {
1940 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. 2003 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
1941 * 2004 *
1942 * @netdev_features: netdev features to be set in each netdev created 2005 * @netdev_features: netdev features to be set in each netdev created
1943 * from this HW. Note only HW checksum features are currently 2006 * from this HW. Note that not all features are usable with mac80211,
1944 * compatible with mac80211. Other feature bits will be rejected. 2007 * other features will be rejected during HW registration.
1945 * 2008 *
1946 * @uapsd_queues: This bitmap is included in (re)association frame to indicate 2009 * @uapsd_queues: This bitmap is included in (re)association frame to indicate
1947 * for each access category if it is uAPSD trigger-enabled and delivery- 2010 * for each access category if it is uAPSD trigger-enabled and delivery-
@@ -1966,7 +2029,7 @@ struct ieee80211_hw {
1966 struct wiphy *wiphy; 2029 struct wiphy *wiphy;
1967 const char *rate_control_algorithm; 2030 const char *rate_control_algorithm;
1968 void *priv; 2031 void *priv;
1969 u32 flags; 2032 unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)];
1970 unsigned int extra_tx_headroom; 2033 unsigned int extra_tx_headroom;
1971 unsigned int extra_beacon_tailroom; 2034 unsigned int extra_beacon_tailroom;
1972 int vif_data_size; 2035 int vif_data_size;
@@ -1992,6 +2055,20 @@ struct ieee80211_hw {
1992 int txq_ac_max_pending; 2055 int txq_ac_max_pending;
1993}; 2056};
1994 2057
2058static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
2059 enum ieee80211_hw_flags flg)
2060{
2061 return test_bit(flg, hw->flags);
2062}
2063#define ieee80211_hw_check(hw, flg) _ieee80211_hw_check(hw, IEEE80211_HW_##flg)
2064
2065static inline void _ieee80211_hw_set(struct ieee80211_hw *hw,
2066 enum ieee80211_hw_flags flg)
2067{
2068 return __set_bit(flg, hw->flags);
2069}
2070#define ieee80211_hw_set(hw, flg) _ieee80211_hw_set(hw, IEEE80211_HW_##flg)
2071
1995/** 2072/**
1996 * struct ieee80211_scan_request - hw scan request 2073 * struct ieee80211_scan_request - hw scan request
1997 * 2074 *
@@ -2505,10 +2582,6 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
2505 * stack. It is always safe to pass more frames than requested, 2582 * stack. It is always safe to pass more frames than requested,
2506 * but this has negative impact on power consumption. 2583 * but this has negative impact on power consumption.
2507 * 2584 *
2508 * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS,
2509 * think of the BSS as your network segment and then this corresponds
2510 * to the regular ethernet device promiscuous mode.
2511 *
2512 * @FIF_ALLMULTI: pass all multicast frames, this is used if requested 2585 * @FIF_ALLMULTI: pass all multicast frames, this is used if requested
2513 * by the user or if the hardware is not capable of filtering by 2586 * by the user or if the hardware is not capable of filtering by
2514 * multicast address. 2587 * multicast address.
@@ -2525,18 +2598,16 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
2525 * mac80211 needs to do and the amount of CPU wakeups, so you should 2598 * mac80211 needs to do and the amount of CPU wakeups, so you should
2526 * honour this flag if possible. 2599 * honour this flag if possible.
2527 * 2600 *
2528 * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS 2601 * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this
2529 * is not set then only those addressed to this station. 2602 * station
2530 * 2603 *
2531 * @FIF_OTHER_BSS: pass frames destined to other BSSes 2604 * @FIF_OTHER_BSS: pass frames destined to other BSSes
2532 * 2605 *
2533 * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only 2606 * @FIF_PSPOLL: pass PS Poll frames
2534 * those addressed to this station.
2535 * 2607 *
2536 * @FIF_PROBE_REQ: pass probe request frames 2608 * @FIF_PROBE_REQ: pass probe request frames
2537 */ 2609 */
2538enum ieee80211_filter_flags { 2610enum ieee80211_filter_flags {
2539 FIF_PROMISC_IN_BSS = 1<<0,
2540 FIF_ALLMULTI = 1<<1, 2611 FIF_ALLMULTI = 1<<1,
2541 FIF_FCSFAIL = 1<<2, 2612 FIF_FCSFAIL = 1<<2,
2542 FIF_PLCPFAIL = 1<<3, 2613 FIF_PLCPFAIL = 1<<3,
@@ -2819,9 +2890,9 @@ enum ieee80211_reconfig_type {
2819 * Returns zero if statistics are available. 2890 * Returns zero if statistics are available.
2820 * The callback can sleep. 2891 * The callback can sleep.
2821 * 2892 *
2822 * @get_tkip_seq: If your device implements TKIP encryption in hardware this 2893 * @get_key_seq: If your device implements encryption in hardware and does
2823 * callback should be provided to read the TKIP transmit IVs (both IV32 2894 * IV/PN assignment then this callback should be provided to read the
2824 * and IV16) for the given key from hardware. 2895 * IV/PN for the given key from hardware.
2825 * The callback must be atomic. 2896 * The callback must be atomic.
2826 * 2897 *
2827 * @set_frag_threshold: Configuration of fragmentation threshold. Assign this 2898 * @set_frag_threshold: Configuration of fragmentation threshold. Assign this
@@ -3004,7 +3075,7 @@ enum ieee80211_reconfig_type {
3004 * The callback can sleep. 3075 * The callback can sleep.
3005 * @event_callback: Notify driver about any event in mac80211. See 3076 * @event_callback: Notify driver about any event in mac80211. See
3006 * &enum ieee80211_event_type for the different types. 3077 * &enum ieee80211_event_type for the different types.
3007 * The callback can sleep. 3078 * The callback must be atomic.
3008 * 3079 *
3009 * @release_buffered_frames: Release buffered frames according to the given 3080 * @release_buffered_frames: Release buffered frames according to the given
3010 * parameters. In the case where the driver buffers some frames for 3081 * parameters. In the case where the driver buffers some frames for
@@ -3220,8 +3291,9 @@ struct ieee80211_ops {
3220 struct ieee80211_vif *vif); 3291 struct ieee80211_vif *vif);
3221 int (*get_stats)(struct ieee80211_hw *hw, 3292 int (*get_stats)(struct ieee80211_hw *hw,
3222 struct ieee80211_low_level_stats *stats); 3293 struct ieee80211_low_level_stats *stats);
3223 void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, 3294 void (*get_key_seq)(struct ieee80211_hw *hw,
3224 u32 *iv32, u16 *iv16); 3295 struct ieee80211_key_conf *key,
3296 struct ieee80211_key_seq *seq);
3225 int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value); 3297 int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
3226 int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); 3298 int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
3227 int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3299 int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3469,14 +3541,15 @@ enum ieee80211_tpt_led_trigger_flags {
3469}; 3541};
3470 3542
3471#ifdef CONFIG_MAC80211_LEDS 3543#ifdef CONFIG_MAC80211_LEDS
3472char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); 3544const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
3473char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); 3545const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
3474char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); 3546const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
3475char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); 3547const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
3476char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, 3548const char *
3477 unsigned int flags, 3549__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
3478 const struct ieee80211_tpt_blink *blink_table, 3550 unsigned int flags,
3479 unsigned int blink_table_len); 3551 const struct ieee80211_tpt_blink *blink_table,
3552 unsigned int blink_table_len);
3480#endif 3553#endif
3481/** 3554/**
3482 * ieee80211_get_tx_led_name - get name of TX LED 3555 * ieee80211_get_tx_led_name - get name of TX LED
@@ -3490,7 +3563,7 @@ char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
3490 * 3563 *
3491 * Return: The name of the LED trigger. %NULL if not configured for LEDs. 3564 * Return: The name of the LED trigger. %NULL if not configured for LEDs.
3492 */ 3565 */
3493static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw) 3566static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
3494{ 3567{
3495#ifdef CONFIG_MAC80211_LEDS 3568#ifdef CONFIG_MAC80211_LEDS
3496 return __ieee80211_get_tx_led_name(hw); 3569 return __ieee80211_get_tx_led_name(hw);
@@ -3511,7 +3584,7 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
3511 * 3584 *
3512 * Return: The name of the LED trigger. %NULL if not configured for LEDs. 3585 * Return: The name of the LED trigger. %NULL if not configured for LEDs.
3513 */ 3586 */
3514static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) 3587static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
3515{ 3588{
3516#ifdef CONFIG_MAC80211_LEDS 3589#ifdef CONFIG_MAC80211_LEDS
3517 return __ieee80211_get_rx_led_name(hw); 3590 return __ieee80211_get_rx_led_name(hw);
@@ -3532,7 +3605,7 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
3532 * 3605 *
3533 * Return: The name of the LED trigger. %NULL if not configured for LEDs. 3606 * Return: The name of the LED trigger. %NULL if not configured for LEDs.
3534 */ 3607 */
3535static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) 3608static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
3536{ 3609{
3537#ifdef CONFIG_MAC80211_LEDS 3610#ifdef CONFIG_MAC80211_LEDS
3538 return __ieee80211_get_assoc_led_name(hw); 3611 return __ieee80211_get_assoc_led_name(hw);
@@ -3553,7 +3626,7 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
3553 * 3626 *
3554 * Return: The name of the LED trigger. %NULL if not configured for LEDs. 3627 * Return: The name of the LED trigger. %NULL if not configured for LEDs.
3555 */ 3628 */
3556static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw) 3629static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
3557{ 3630{
3558#ifdef CONFIG_MAC80211_LEDS 3631#ifdef CONFIG_MAC80211_LEDS
3559 return __ieee80211_get_radio_led_name(hw); 3632 return __ieee80211_get_radio_led_name(hw);
@@ -3574,7 +3647,7 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
3574 * 3647 *
3575 * Note: This function must be called before ieee80211_register_hw(). 3648 * Note: This function must be called before ieee80211_register_hw().
3576 */ 3649 */
3577static inline char * 3650static inline const char *
3578ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags, 3651ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
3579 const struct ieee80211_tpt_blink *blink_table, 3652 const struct ieee80211_tpt_blink *blink_table,
3580 unsigned int blink_table_len) 3653 unsigned int blink_table_len)
@@ -4255,40 +4328,6 @@ void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
4255 u8 *k1, u8 *k2); 4328 u8 *k1, u8 *k2);
4256 4329
4257/** 4330/**
4258 * struct ieee80211_key_seq - key sequence counter
4259 *
4260 * @tkip: TKIP data, containing IV32 and IV16 in host byte order
4261 * @ccmp: PN data, most significant byte first (big endian,
4262 * reverse order than in packet)
4263 * @aes_cmac: PN data, most significant byte first (big endian,
4264 * reverse order than in packet)
4265 * @aes_gmac: PN data, most significant byte first (big endian,
4266 * reverse order than in packet)
4267 * @gcmp: PN data, most significant byte first (big endian,
4268 * reverse order than in packet)
4269 */
4270struct ieee80211_key_seq {
4271 union {
4272 struct {
4273 u32 iv32;
4274 u16 iv16;
4275 } tkip;
4276 struct {
4277 u8 pn[6];
4278 } ccmp;
4279 struct {
4280 u8 pn[6];
4281 } aes_cmac;
4282 struct {
4283 u8 pn[6];
4284 } aes_gmac;
4285 struct {
4286 u8 pn[6];
4287 } gcmp;
4288 };
4289};
4290
4291/**
4292 * ieee80211_get_key_tx_seq - get key TX sequence counter 4331 * ieee80211_get_key_tx_seq - get key TX sequence counter
4293 * 4332 *
4294 * @keyconf: the parameter passed with the set key 4333 * @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 7df28a4c23f9..f534a46911dc 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -31,99 +31,122 @@
31 */ 31 */
32#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14) 32#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
33 33
34/* The following flags are used to indicate changed address settings from 34/**
35 * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
36 *
37 * The following flags are used to indicate changed address settings from
35 * the stack to the hardware. 38 * the stack to the hardware.
39 *
40 * @IEEE802154_AFILT_SADDR_CHANGED: Indicates that the short address will be
41 * change.
42 *
43 * @IEEE802154_AFILT_IEEEADDR_CHANGED: Indicates that the extended address
44 * will be change.
45 *
46 * @IEEE802154_AFILT_PANID_CHANGED: Indicates that the pan id will be change.
47 *
48 * @IEEE802154_AFILT_PANC_CHANGED: Indicates that the address filter will
49 * do frame address filtering as a pan coordinator.
36 */ 50 */
51enum ieee802154_hw_addr_filt_flags {
52 IEEE802154_AFILT_SADDR_CHANGED = BIT(0),
53 IEEE802154_AFILT_IEEEADDR_CHANGED = BIT(1),
54 IEEE802154_AFILT_PANID_CHANGED = BIT(2),
55 IEEE802154_AFILT_PANC_CHANGED = BIT(3),
56};
37 57
38/* indicates that the Short Address changed */ 58/**
39#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001 59 * struct ieee802154_hw_addr_filt - hardware address filtering settings
40/* indicates that the IEEE Address changed */ 60 *
41#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002 61 * @pan_id: pan_id which should be set to the hardware address filter.
42/* indicates that the PAN ID changed */ 62 *
43#define IEEE802154_AFILT_PANID_CHANGED 0x00000004 63 * @short_addr: short_addr which should be set to the hardware address filter.
44/* indicates that PAN Coordinator status changed */ 64 *
45#define IEEE802154_AFILT_PANC_CHANGED 0x00000008 65 * @ieee_addr: extended address which should be set to the hardware address
46 66 * filter.
67 *
68 * @pan_coord: boolean if hardware filtering should be operate as coordinator.
69 */
47struct ieee802154_hw_addr_filt { 70struct ieee802154_hw_addr_filt {
48 __le16 pan_id; /* Each independent PAN selects a unique 71 __le16 pan_id;
49 * identifier. This PAN id allows communication
50 * between devices within a network using short
51 * addresses and enables transmissions between
52 * devices across independent networks.
53 */
54 __le16 short_addr; 72 __le16 short_addr;
55 __le64 ieee_addr; 73 __le64 ieee_addr;
56 u8 pan_coord; 74 bool pan_coord;
57};
58
59struct ieee802154_vif {
60 int type;
61
62 /* must be last */
63 u8 drv_priv[0] __aligned(sizeof(void *));
64}; 75};
65 76
77/**
78 * struct ieee802154_hw - ieee802154 hardware
79 *
80 * @extra_tx_headroom: headroom to reserve in each transmit skb for use by the
81 * driver (e.g. for transmit headers.)
82 *
83 * @flags: hardware flags, see &enum ieee802154_hw_flags
84 *
85 * @parent: parent device of the hardware.
86 *
87 * @priv: pointer to private area that was allocated for driver use along with
88 * this structure.
89 *
90 * @phy: This points to the &struct wpan_phy allocated for this 802.15.4 PHY.
91 */
66struct ieee802154_hw { 92struct ieee802154_hw {
67 /* filled by the driver */ 93 /* filled by the driver */
68 int extra_tx_headroom; 94 int extra_tx_headroom;
69 u32 flags; 95 u32 flags;
70 struct device *parent; 96 struct device *parent;
97 void *priv;
71 98
72 /* filled by mac802154 core */ 99 /* filled by mac802154 core */
73 struct ieee802154_hw_addr_filt hw_filt;
74 void *priv;
75 struct wpan_phy *phy; 100 struct wpan_phy *phy;
76 size_t vif_data_size;
77}; 101};
78 102
79/* Checksum is in hardware and is omitted from a packet 103/**
104 * enum ieee802154_hw_flags - hardware flags
80 * 105 *
81 * These following flags are used to indicate hardware capabilities to 106 * These flags are used to indicate hardware capabilities to
82 * the stack. Generally, flags here should have their meaning 107 * the stack. Generally, flags here should have their meaning
83 * done in a way that the simplest hardware doesn't need setting 108 * done in a way that the simplest hardware doesn't need setting
84 * any particular flags. There are some exceptions to this rule, 109 * any particular flags. There are some exceptions to this rule,
85 * however, so you are advised to review these flags carefully. 110 * however, so you are advised to review these flags carefully.
111 *
112 * @IEEE802154_HW_TX_OMIT_CKSUM: Indicates that xmitter will add FCS on it's
113 * own.
114 *
115 * @IEEE802154_HW_LBT: Indicates that transceiver will support listen before
116 * transmit.
117 *
118 * @IEEE802154_HW_CSMA_PARAMS: Indicates that transceiver will support csma
119 * parameters (max_be, min_be, backoff exponents).
120 *
121 * @IEEE802154_HW_FRAME_RETRIES: Indicates that transceiver will support ARET
122 * frame retries setting.
123 *
124 * @IEEE802154_HW_AFILT: Indicates that transceiver will support hardware
125 * address filter setting.
126 *
127 * @IEEE802154_HW_PROMISCUOUS: Indicates that transceiver will support
128 * promiscuous mode setting.
129 *
130 * @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS.
131 *
132 * @IEEE802154_HW_RX_DROP_BAD_CKSUM: Indicates that receiver will not filter
133 * frames with bad checksum.
86 */ 134 */
87 135enum ieee802154_hw_flags {
88/* Indicates that xmitter will add FCS on it's own. */ 136 IEEE802154_HW_TX_OMIT_CKSUM = BIT(0),
89#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001 137 IEEE802154_HW_LBT = BIT(1),
90/* Indicates that receiver will autorespond with ACK frames. */ 138 IEEE802154_HW_CSMA_PARAMS = BIT(2),
91#define IEEE802154_HW_AACK 0x00000002 139 IEEE802154_HW_FRAME_RETRIES = BIT(3),
92/* Indicates that transceiver will support transmit power setting. */ 140 IEEE802154_HW_AFILT = BIT(4),
93#define IEEE802154_HW_TXPOWER 0x00000004 141 IEEE802154_HW_PROMISCUOUS = BIT(5),
94/* Indicates that transceiver will support listen before transmit. */ 142 IEEE802154_HW_RX_OMIT_CKSUM = BIT(6),
95#define IEEE802154_HW_LBT 0x00000008 143 IEEE802154_HW_RX_DROP_BAD_CKSUM = BIT(7),
96/* Indicates that transceiver will support cca mode setting. */ 144};
97#define IEEE802154_HW_CCA_MODE 0x00000010
98/* Indicates that transceiver will support cca ed level setting. */
99#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
100/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
101 * settings. */
102#define IEEE802154_HW_CSMA_PARAMS 0x00000040
103/* Indicates that transceiver will support ARET frame retries setting. */
104#define IEEE802154_HW_FRAME_RETRIES 0x00000080
105/* Indicates that transceiver will support hardware address filter setting. */
106#define IEEE802154_HW_AFILT 0x00000100
107/* Indicates that transceiver will support promiscuous mode setting. */
108#define IEEE802154_HW_PROMISCUOUS 0x00000200
109/* Indicates that receiver omits FCS. */
110#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400
111/* Indicates that receiver will not filter frames with bad checksum. */
112#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800
113 145
114/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ 146/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
115#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ 147#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \
116 IEEE802154_HW_RX_OMIT_CKSUM) 148 IEEE802154_HW_RX_OMIT_CKSUM)
117 149
118/* This groups the most common CSMA support fields into one. */
119#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \
120 IEEE802154_HW_CCA_ED_LEVEL | \
121 IEEE802154_HW_CSMA_PARAMS)
122
123/* This groups the most common ARET support fields into one. */
124#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \
125 IEEE802154_HW_FRAME_RETRIES)
126
127/* struct ieee802154_ops - callbacks from mac802154 to the driver 150/* struct ieee802154_ops - callbacks from mac802154 to the driver
128 * 151 *
129 * This structure contains various callbacks that the driver may 152 * This structure contains various callbacks that the driver may
@@ -171,7 +194,7 @@ struct ieee802154_hw {
171 * Returns either zero, or negative errno. 194 * Returns either zero, or negative errno.
172 * 195 *
173 * set_txpower: 196 * set_txpower:
174 * Set radio transmit power in dB. Called with pib_lock held. 197 * Set radio transmit power in mBm. Called with pib_lock held.
175 * Returns either zero, or negative errno. 198 * Returns either zero, or negative errno.
176 * 199 *
177 * set_lbt 200 * set_lbt
@@ -184,7 +207,7 @@ struct ieee802154_hw {
184 * Returns either zero, or negative errno. 207 * Returns either zero, or negative errno.
185 * 208 *
186 * set_cca_ed_level 209 * set_cca_ed_level
187 * Sets the CCA energy detection threshold in dBm. Called with pib_lock 210 * Sets the CCA energy detection threshold in mBm. Called with pib_lock
188 * held. 211 * held.
189 * Returns either zero, or negative errno. 212 * Returns either zero, or negative errno.
190 * 213 *
@@ -213,12 +236,11 @@ struct ieee802154_ops {
213 int (*set_hw_addr_filt)(struct ieee802154_hw *hw, 236 int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
214 struct ieee802154_hw_addr_filt *filt, 237 struct ieee802154_hw_addr_filt *filt,
215 unsigned long changed); 238 unsigned long changed);
216 int (*set_txpower)(struct ieee802154_hw *hw, s8 dbm); 239 int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
217 int (*set_lbt)(struct ieee802154_hw *hw, bool on); 240 int (*set_lbt)(struct ieee802154_hw *hw, bool on);
218 int (*set_cca_mode)(struct ieee802154_hw *hw, 241 int (*set_cca_mode)(struct ieee802154_hw *hw,
219 const struct wpan_phy_cca *cca); 242 const struct wpan_phy_cca *cca);
220 int (*set_cca_ed_level)(struct ieee802154_hw *hw, 243 int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
221 s32 level);
222 int (*set_csma_params)(struct ieee802154_hw *hw, 244 int (*set_csma_params)(struct ieee802154_hw *hw,
223 u8 min_be, u8 max_be, u8 retries); 245 u8 min_be, u8 max_be, u8 retries);
224 int (*set_frame_retries)(struct ieee802154_hw *hw, 246 int (*set_frame_retries)(struct ieee802154_hw *hw,
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index f733656404de..e951453e0a23 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -28,6 +28,8 @@
28#include <net/netns/xfrm.h> 28#include <net/netns/xfrm.h>
29#include <net/netns/mpls.h> 29#include <net/netns/mpls.h>
30#include <linux/ns_common.h> 30#include <linux/ns_common.h>
31#include <linux/idr.h>
32#include <linux/skbuff.h>
31 33
32struct user_namespace; 34struct user_namespace;
33struct proc_dir_entry; 35struct proc_dir_entry;
@@ -58,6 +60,7 @@ struct net {
58 struct list_head exit_list; /* Use only net_mutex */ 60 struct list_head exit_list; /* Use only net_mutex */
59 61
60 struct user_namespace *user_ns; /* Owning user namespace */ 62 struct user_namespace *user_ns; /* Owning user namespace */
63 spinlock_t nsid_lock;
61 struct idr netns_ids; 64 struct idr netns_ids;
62 65
63 struct ns_common ns; 66 struct ns_common ns;
@@ -271,7 +274,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
271#define __net_initconst __initconst 274#define __net_initconst __initconst
272#endif 275#endif
273 276
277int peernet2id_alloc(struct net *net, struct net *peer);
274int peernet2id(struct net *net, struct net *peer); 278int peernet2id(struct net *net, struct net *peer);
279bool peernet_has_id(struct net *net, struct net *peer);
275struct net *get_net_ns_by_id(struct net *net, int id); 280struct net *get_net_ns_by_id(struct net *net, int id);
276 281
277struct pernet_operations { 282struct pernet_operations {
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 2aa6048a55c1..bab824bde92c 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -1,6 +1,66 @@
1#ifndef _BR_NETFILTER_H_ 1#ifndef _BR_NETFILTER_H_
2#define _BR_NETFILTER_H_ 2#define _BR_NETFILTER_H_
3 3
4#include "../../../net/bridge/br_private.h"
5
6static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
7{
8 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
9
10 if (likely(skb->nf_bridge))
11 atomic_set(&(skb->nf_bridge->use), 1);
12
13 return skb->nf_bridge;
14}
15
16void nf_bridge_update_protocol(struct sk_buff *skb);
17
18static inline struct nf_bridge_info *
19nf_bridge_info_get(const struct sk_buff *skb)
20{
21 return skb->nf_bridge;
22}
23
24unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
25
26static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
27{
28 unsigned int len = nf_bridge_encap_header_len(skb);
29
30 skb_push(skb, len);
31 skb->network_header -= len;
32}
33
34int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb);
35
36static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
37{
38 struct net_bridge_port *port;
39
40 port = br_port_get_rcu(dev);
41 return port ? &port->br->fake_rtable : NULL;
42}
43
44struct net_device *setup_pre_routing(struct sk_buff *skb);
4void br_netfilter_enable(void); 45void br_netfilter_enable(void);
5 46
47#if IS_ENABLED(CONFIG_IPV6)
48int br_validate_ipv6(struct sk_buff *skb);
49unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
50 struct sk_buff *skb,
51 const struct nf_hook_state *state);
52#else
53static inline int br_validate_ipv6(struct sk_buff *skb)
54{
55 return -1;
56}
57
58static inline unsigned int
59br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
60 const struct nf_hook_state *state)
61{
62 return NF_DROP;
63}
64#endif
65
6#endif /* _BR_NETFILTER_H_ */ 66#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 095433b8a8b0..37cd3911d5c5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
291extern unsigned int nf_conntrack_hash_rnd; 291extern unsigned int nf_conntrack_hash_rnd;
292void init_nf_conntrack_hash_rnd(void); 292void init_nf_conntrack_hash_rnd(void);
293 293
294void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); 294struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
295 295
296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index d81d584157e1..e8635854a55b 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -24,6 +24,8 @@ struct nf_queue_entry {
24struct nf_queue_handler { 24struct nf_queue_handler {
25 int (*outfn)(struct nf_queue_entry *entry, 25 int (*outfn)(struct nf_queue_entry *entry,
26 unsigned int queuenum); 26 unsigned int queuenum);
27 void (*nf_hook_drop)(struct net *net,
28 struct nf_hook_ops *ops);
27}; 29};
28 30
29void nf_register_queue_handler(const struct nf_queue_handler *qh); 31void nf_register_queue_handler(const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index e6bcf55dcf20..2a246680a6c3 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -781,6 +781,7 @@ struct nft_stats {
781}; 781};
782 782
783#define NFT_HOOK_OPS_MAX 2 783#define NFT_HOOK_OPS_MAX 2
784#define NFT_BASECHAIN_DISABLED (1 << 0)
784 785
785/** 786/**
786 * struct nft_base_chain - nf_tables base chain 787 * struct nft_base_chain - nf_tables base chain
@@ -791,14 +792,17 @@ struct nft_stats {
791 * @policy: default policy 792 * @policy: default policy
792 * @stats: per-cpu chain stats 793 * @stats: per-cpu chain stats
793 * @chain: the chain 794 * @chain: the chain
795 * @dev_name: device name that this base chain is attached to (if any)
794 */ 796 */
795struct nft_base_chain { 797struct nft_base_chain {
796 struct nf_hook_ops ops[NFT_HOOK_OPS_MAX]; 798 struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
797 possible_net_t pnet; 799 possible_net_t pnet;
798 const struct nf_chain_type *type; 800 const struct nf_chain_type *type;
799 u8 policy; 801 u8 policy;
802 u8 flags;
800 struct nft_stats __percpu *stats; 803 struct nft_stats __percpu *stats;
801 struct nft_chain chain; 804 struct nft_chain chain;
805 char dev_name[IFNAMSIZ];
802}; 806};
803 807
804static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain) 808static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
@@ -806,6 +810,11 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
806 return container_of(chain, struct nft_base_chain, chain); 810 return container_of(chain, struct nft_base_chain, chain);
807} 811}
808 812
813int nft_register_basechain(struct nft_base_chain *basechain,
814 unsigned int hook_nops);
815void nft_unregister_basechain(struct nft_base_chain *basechain,
816 unsigned int hook_nops);
817
809unsigned int nft_do_chain(struct nft_pktinfo *pkt, 818unsigned int nft_do_chain(struct nft_pktinfo *pkt,
810 const struct nf_hook_ops *ops); 819 const struct nf_hook_ops *ops);
811 820
@@ -830,6 +839,10 @@ struct nft_table {
830 char name[NFT_TABLE_MAXNAMELEN]; 839 char name[NFT_TABLE_MAXNAMELEN];
831}; 840};
832 841
842enum nft_af_flags {
843 NFT_AF_NEEDS_DEV = (1 << 0),
844};
845
833/** 846/**
834 * struct nft_af_info - nf_tables address family info 847 * struct nft_af_info - nf_tables address family info
835 * 848 *
@@ -838,6 +851,7 @@ struct nft_table {
838 * @nhooks: number of hooks in this family 851 * @nhooks: number of hooks in this family
839 * @owner: module owner 852 * @owner: module owner
840 * @tables: used internally 853 * @tables: used internally
854 * @flags: family flags
841 * @nops: number of hook ops in this family 855 * @nops: number of hook ops in this family
842 * @hook_ops_init: initialization function for chain hook ops 856 * @hook_ops_init: initialization function for chain hook ops
843 * @hooks: hookfn overrides for packet validation 857 * @hooks: hookfn overrides for packet validation
@@ -848,6 +862,7 @@ struct nft_af_info {
848 unsigned int nhooks; 862 unsigned int nhooks;
849 struct module *owner; 863 struct module *owner;
850 struct list_head tables; 864 struct list_head tables;
865 u32 flags;
851 unsigned int nops; 866 unsigned int nops;
852 void (*hook_ops_init)(struct nf_hook_ops *, 867 void (*hook_ops_init)(struct nf_hook_ops *,
853 unsigned int); 868 unsigned int);
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 29d6a94db54d..723b61c82b3f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -68,7 +68,6 @@ struct ct_pcpu {
68 spinlock_t lock; 68 spinlock_t lock;
69 struct hlist_nulls_head unconfirmed; 69 struct hlist_nulls_head unconfirmed;
70 struct hlist_nulls_head dying; 70 struct hlist_nulls_head dying;
71 struct hlist_nulls_head tmpl;
72}; 71};
73 72
74struct netns_ct { 73struct netns_ct {
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 614a49be68a9..c68926b4899c 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -19,6 +19,7 @@ struct sock;
19struct local_ports { 19struct local_ports {
20 seqlock_t lock; 20 seqlock_t lock;
21 int range[2]; 21 int range[2];
22 bool warned;
22}; 23};
23 24
24struct ping_group_range { 25struct ping_group_range {
@@ -77,6 +78,8 @@ struct netns_ipv4 {
77 struct local_ports ip_local_ports; 78 struct local_ports ip_local_ports;
78 79
79 int sysctl_tcp_ecn; 80 int sysctl_tcp_ecn;
81 int sysctl_tcp_ecn_fallback;
82
80 int sysctl_ip_no_pmtu_disc; 83 int sysctl_ip_no_pmtu_disc;
81 int sysctl_ip_fwd_use_pmtu; 84 int sysctl_ip_fwd_use_pmtu;
82 int sysctl_ip_nonlocal_bind; 85 int sysctl_ip_nonlocal_bind;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index d2527bf81142..8d93544a2d2b 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -34,6 +34,7 @@ struct netns_sysctl_ipv6 {
34 int fwmark_reflect; 34 int fwmark_reflect;
35 int idgen_retries; 35 int idgen_retries;
36 int idgen_delay; 36 int idgen_delay;
37 int flowlabel_state_ranges;
37}; 38};
38 39
39struct netns_ipv6 { 40struct netns_ipv6 {
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 88740024ccf3..532e4ba64f49 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -1,9 +1,9 @@
1#ifndef __NETNS_NETFILTER_H 1#ifndef __NETNS_NETFILTER_H
2#define __NETNS_NETFILTER_H 2#define __NETNS_NETFILTER_H
3 3
4#include <linux/proc_fs.h> 4#include <linux/netfilter_defs.h>
5#include <linux/netfilter.h>
6 5
6struct proc_dir_entry;
7struct nf_logger; 7struct nf_logger;
8 8
9struct netns_nf { 9struct netns_nf {
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index eee608b12cc9..c80781146019 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,6 +13,7 @@ struct netns_nftables {
13 struct nft_af_info *inet; 13 struct nft_af_info *inet;
14 struct nft_af_info *arp; 14 struct nft_af_info *arp;
15 struct nft_af_info *bridge; 15 struct nft_af_info *bridge;
16 struct nft_af_info *netdev;
16 unsigned int base_seq; 17 unsigned int base_seq;
17 u8 gencursor; 18 u8 gencursor;
18}; 19};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 3573a81815ad..8ba379f9e467 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -31,6 +31,7 @@ struct netns_sctp {
31 struct list_head addr_waitq; 31 struct list_head addr_waitq;
32 struct timer_list addr_wq_timer; 32 struct timer_list addr_wq_timer;
33 struct list_head auto_asconf_splist; 33 struct list_head auto_asconf_splist;
34 /* Lock that protects both addr_waitq and auto_asconf_splist */
34 spinlock_t addr_wq_lock; 35 spinlock_t addr_wq_lock;
35 36
36 /* Lock that protects the local_addr_list writers */ 37 /* Lock that protects the local_addr_list writers */
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
index 4d6597ad6067..c8a7681efa6a 100644
--- a/include/net/netns/x_tables.h
+++ b/include/net/netns/x_tables.h
@@ -2,7 +2,7 @@
2#define __NETNS_X_TABLES_H 2#define __NETNS_X_TABLES_H
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/netfilter.h> 5#include <linux/netfilter_defs.h>
6 6
7struct ebt_table; 7struct ebt_table;
8 8
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 020a814bc8ed..316694dafa5b 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -179,6 +179,13 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
179void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata); 179void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
180void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev); 180void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
181 181
182static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev,
183 struct nfc_vendor_cmd *cmds,
184 int n_cmds)
185{
186 return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds);
187}
188
182void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err); 189void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
183 190
184int nfc_hci_result_to_errno(u8 result); 191int nfc_hci_result_to_errno(u8 result);
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index a2f2f3d3196d..75d2e1880059 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -35,6 +35,7 @@
35#define NCI_MAX_NUM_RF_CONFIGS 10 35#define NCI_MAX_NUM_RF_CONFIGS 10
36#define NCI_MAX_NUM_CONN 10 36#define NCI_MAX_NUM_CONN 10
37#define NCI_MAX_PARAM_LEN 251 37#define NCI_MAX_PARAM_LEN 251
38#define NCI_MAX_PACKET_SIZE 258
38 39
39/* NCI Status Codes */ 40/* NCI Status Codes */
40#define NCI_STATUS_OK 0x00 41#define NCI_STATUS_OK 0x00
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index d4dcc7199fd7..01fc8c531115 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -31,6 +31,7 @@
31 31
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/tty.h>
34 35
35#include <net/nfc/nfc.h> 36#include <net/nfc/nfc.h>
36#include <net/nfc/nci.h> 37#include <net/nfc/nci.h>
@@ -66,7 +67,14 @@ enum nci_state {
66 67
67struct nci_dev; 68struct nci_dev;
68 69
70struct nci_prop_ops {
71 __u16 opcode;
72 int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
73 int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
74};
75
69struct nci_ops { 76struct nci_ops {
77 int (*init)(struct nci_dev *ndev);
70 int (*open)(struct nci_dev *ndev); 78 int (*open)(struct nci_dev *ndev);
71 int (*close)(struct nci_dev *ndev); 79 int (*close)(struct nci_dev *ndev);
72 int (*send)(struct nci_dev *ndev, struct sk_buff *skb); 80 int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
@@ -84,12 +92,16 @@ struct nci_ops {
84 struct sk_buff *skb); 92 struct sk_buff *skb);
85 void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd, 93 void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
86 struct sk_buff *skb); 94 struct sk_buff *skb);
95
96 struct nci_prop_ops *prop_ops;
97 size_t n_prop_ops;
87}; 98};
88 99
89#define NCI_MAX_SUPPORTED_RF_INTERFACES 4 100#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
90#define NCI_MAX_DISCOVERED_TARGETS 10 101#define NCI_MAX_DISCOVERED_TARGETS 10
91#define NCI_MAX_NUM_NFCEE 255 102#define NCI_MAX_NUM_NFCEE 255
92#define NCI_MAX_CONN_ID 7 103#define NCI_MAX_CONN_ID 7
104#define NCI_MAX_PROPRIETARY_CMD 64
93 105
94struct nci_conn_info { 106struct nci_conn_info {
95 struct list_head list; 107 struct list_head list;
@@ -264,6 +276,8 @@ int nci_request(struct nci_dev *ndev,
264 void (*req)(struct nci_dev *ndev, 276 void (*req)(struct nci_dev *ndev,
265 unsigned long opt), 277 unsigned long opt),
266 unsigned long opt, __u32 timeout); 278 unsigned long opt, __u32 timeout);
279int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
280
267int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); 281int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
268int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val); 282int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
269 283
@@ -318,8 +332,19 @@ static inline void *nci_get_drvdata(struct nci_dev *ndev)
318 return ndev->driver_data; 332 return ndev->driver_data;
319} 333}
320 334
335static inline int nci_set_vendor_cmds(struct nci_dev *ndev,
336 struct nfc_vendor_cmd *cmds,
337 int n_cmds)
338{
339 return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds);
340}
341
321void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb); 342void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
322void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb); 343void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
344int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
345 struct sk_buff *skb);
346int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
347 struct sk_buff *skb);
323void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb); 348void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
324int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload); 349int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
325int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb); 350int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
@@ -367,4 +392,50 @@ int nci_spi_send(struct nci_spi *nspi,
367 struct sk_buff *skb); 392 struct sk_buff *skb);
368struct sk_buff *nci_spi_read(struct nci_spi *nspi); 393struct sk_buff *nci_spi_read(struct nci_spi *nspi);
369 394
395/* ----- NCI UART ---- */
396
397/* Ioctl */
398#define NCIUARTSETDRIVER _IOW('U', 0, char *)
399
400enum nci_uart_driver {
401 NCI_UART_DRIVER_MARVELL = 0,
402 NCI_UART_DRIVER_MAX
403};
404
405struct nci_uart;
406
407struct nci_uart_ops {
408 int (*open)(struct nci_uart *nci_uart);
409 void (*close)(struct nci_uart *nci_uart);
410 int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
411 int (*recv_buf)(struct nci_uart *nci_uart, const u8 *data, char *flags,
412 int count);
413 int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
414 void (*tx_start)(struct nci_uart *nci_uart);
415 void (*tx_done)(struct nci_uart *nci_uart);
416};
417
418struct nci_uart {
419 struct module *owner;
420 struct nci_uart_ops ops;
421 const char *name;
422 enum nci_uart_driver driver;
423
424 /* Dynamic data */
425 struct nci_dev *ndev;
426 spinlock_t rx_lock;
427 struct work_struct write_work;
428 struct tty_struct *tty;
429 unsigned long tx_state;
430 struct sk_buff_head tx_q;
431 struct sk_buff *tx_skb;
432 struct sk_buff *rx_skb;
433 int rx_packet_len;
434 void *drv_data;
435};
436
437int nci_uart_register(struct nci_uart *nu);
438void nci_uart_unregister(struct nci_uart *nu);
439void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl);
440
370#endif /* __NCI_CORE_H */ 441#endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 7ac029c07546..f9e58ae45f9c 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -165,6 +165,12 @@ struct nfc_genl_data {
165 struct mutex genl_data_mutex; 165 struct mutex genl_data_mutex;
166}; 166};
167 167
168struct nfc_vendor_cmd {
169 __u32 vendor_id;
170 __u32 subcmd;
171 int (*doit)(struct nfc_dev *dev, void *data, size_t data_len);
172};
173
168struct nfc_dev { 174struct nfc_dev {
169 int idx; 175 int idx;
170 u32 target_next_idx; 176 u32 target_next_idx;
@@ -193,6 +199,9 @@ struct nfc_dev {
193 199
194 struct rfkill *rfkill; 200 struct rfkill *rfkill;
195 201
202 struct nfc_vendor_cmd *vendor_cmds;
203 int n_vendor_cmds;
204
196 struct nfc_ops *ops; 205 struct nfc_ops *ops;
197}; 206};
198#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) 207#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -296,4 +305,17 @@ struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
296void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, 305void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
297 u8 payload_type, u8 direction); 306 u8 payload_type, u8 direction);
298 307
308static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
309 struct nfc_vendor_cmd *cmds,
310 int n_cmds)
311{
312 if (dev->vendor_cmds || dev->n_vendor_cmds)
313 return -EINVAL;
314
315 dev->vendor_cmds = cmds;
316 dev->n_vendor_cmds = n_cmds;
317
318 return 0;
319}
320
299#endif /* __NET_NFC_H */ 321#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index f8b5bc997959..b0ab530d28cd 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -100,6 +100,10 @@ enum nl802154_attrs {
100 100
101 NL802154_ATTR_EXTENDED_ADDR, 101 NL802154_ATTR_EXTENDED_ADDR,
102 102
103 NL802154_ATTR_WPAN_PHY_CAPS,
104
105 NL802154_ATTR_SUPPORTED_COMMANDS,
106
103 /* add attributes here, update the policy in nl802154.c */ 107 /* add attributes here, update the policy in nl802154.c */
104 108
105 __NL802154_ATTR_AFTER_LAST, 109 __NL802154_ATTR_AFTER_LAST,
@@ -120,6 +124,61 @@ enum nl802154_iftype {
120}; 124};
121 125
122/** 126/**
127 * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
128 *
129 * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
130 * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
131 * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
132 * nl802154_wpan_phy_tx_power
133 * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
134 * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
135 * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
136 * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
137 * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
138 * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
139 * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
140 * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
141 * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
142 * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
143 * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
144 * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
145 * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
146 * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
147 * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
148 * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
149 */
150enum nl802154_wpan_phy_capability_attr {
151 __NL802154_CAP_ATTR_INVALID,
152
153 NL802154_CAP_ATTR_IFTYPES,
154
155 NL802154_CAP_ATTR_CHANNELS,
156 NL802154_CAP_ATTR_TX_POWERS,
157
158 NL802154_CAP_ATTR_CCA_ED_LEVELS,
159 NL802154_CAP_ATTR_CCA_MODES,
160 NL802154_CAP_ATTR_CCA_OPTS,
161
162 NL802154_CAP_ATTR_MIN_MINBE,
163 NL802154_CAP_ATTR_MAX_MINBE,
164
165 NL802154_CAP_ATTR_MIN_MAXBE,
166 NL802154_CAP_ATTR_MAX_MAXBE,
167
168 NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
169 NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
170
171 NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
172 NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
173
174 NL802154_CAP_ATTR_LBT,
175
176 /* keep last */
177 __NL802154_CAP_ATTR_AFTER_LAST,
178 NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
179};
180
181/**
123 * enum nl802154_cca_modes - cca modes 182 * enum nl802154_cca_modes - cca modes
124 * 183 *
125 * @__NL802154_CCA_INVALID: cca mode number 0 is reserved 184 * @__NL802154_CCA_INVALID: cca mode number 0 is reserved
@@ -128,7 +187,7 @@ enum nl802154_iftype {
128 * @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold 187 * @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold
129 * @NL802154_CCA_ALOHA: CCA shall always report an idle medium 188 * @NL802154_CCA_ALOHA: CCA shall always report an idle medium
130 * @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame 189 * @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame
131 * @NL802154_CCA_UWB_MULTIPEXED: UWB preamble sense based on the packet with 190 * @NL802154_CCA_UWB_MULTIPLEXED: UWB preamble sense based on the packet with
132 * the multiplexed preamble 191 * the multiplexed preamble
133 * @__NL802154_CCA_ATTR_AFTER_LAST: Internal 192 * @__NL802154_CCA_ATTR_AFTER_LAST: Internal
134 * @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number 193 * @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number
@@ -140,7 +199,7 @@ enum nl802154_cca_modes {
140 NL802154_CCA_ENERGY_CARRIER, 199 NL802154_CCA_ENERGY_CARRIER,
141 NL802154_CCA_ALOHA, 200 NL802154_CCA_ALOHA,
142 NL802154_CCA_UWB_SHR, 201 NL802154_CCA_UWB_SHR,
143 NL802154_CCA_UWB_MULTIPEXED, 202 NL802154_CCA_UWB_MULTIPLEXED,
144 203
145 /* keep last */ 204 /* keep last */
146 __NL802154_CCA_ATTR_AFTER_LAST, 205 __NL802154_CCA_ATTR_AFTER_LAST,
@@ -162,4 +221,26 @@ enum nl802154_cca_opts {
162 NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1 221 NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
163}; 222};
164 223
224/**
225 * enum nl802154_supported_bool_states - bool states for bool capability entry
226 *
227 * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
228 * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
229 * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
230 * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
231 * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
232 * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
233 */
234enum nl802154_supported_bool_states {
235 NL802154_SUPPORTED_BOOL_FALSE,
236 NL802154_SUPPORTED_BOOL_TRUE,
237 /* to handle them in a mask */
238 __NL802154_SUPPORTED_BOOL_INVALD,
239 NL802154_SUPPORTED_BOOL_BOTH,
240
241 /* keep last */
242 __NL802154_SUPPORTED_BOOL_AFTER_LAST,
243 NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
244};
245
165#endif /* __NL802154_H */ 246#endif /* __NL802154_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 9f4265ce8892..87935cad2f7b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -64,6 +64,7 @@ struct request_sock {
64 struct timer_list rsk_timer; 64 struct timer_list rsk_timer;
65 const struct request_sock_ops *rsk_ops; 65 const struct request_sock_ops *rsk_ops;
66 struct sock *sk; 66 struct sock *sk;
67 u32 *saved_syn;
67 u32 secid; 68 u32 secid;
68 u32 peer_secid; 69 u32 peer_secid;
69}; 70};
@@ -77,7 +78,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
77 req->rsk_ops = ops; 78 req->rsk_ops = ops;
78 sock_hold(sk_listener); 79 sock_hold(sk_listener);
79 req->rsk_listener = sk_listener; 80 req->rsk_listener = sk_listener;
80 81 req->saved_syn = NULL;
81 /* Following is temporary. It is coupled with debugging 82 /* Following is temporary. It is coupled with debugging
82 * helpers in reqsk_put() & reqsk_free() 83 * helpers in reqsk_put() & reqsk_free()
83 */ 84 */
@@ -104,6 +105,7 @@ static inline void reqsk_free(struct request_sock *req)
104 req->rsk_ops->destructor(req); 105 req->rsk_ops->destructor(req);
105 if (req->rsk_listener) 106 if (req->rsk_listener)
106 sock_put(req->rsk_listener); 107 sock_put(req->rsk_listener);
108 kfree(req->saved_syn);
107 kmem_cache_free(req->rsk_ops->slab, req); 109 kmem_cache_free(req->rsk_ops->slab, req);
108} 110}
109 111
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6d778efcfdfd..2738f6f87908 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -501,12 +501,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
501 return sch->enqueue(skb, sch); 501 return sch->enqueue(skb, sch);
502} 502}
503 503
504static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
505{
506 qdisc_skb_cb(skb)->pkt_len = skb->len;
507 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
508}
509
510static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) 504static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
511{ 505{
512 return q->flags & TCQ_F_CPUSTATS; 506 return q->flags & TCQ_F_CPUSTATS;
@@ -745,23 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
745 return rtab->data[slot]; 739 return rtab->data[slot];
746} 740}
747 741
748#ifdef CONFIG_NET_CLS_ACT
749static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
750 int action)
751{
752 struct sk_buff *n;
753
754 n = skb_clone(skb, gfp_mask);
755
756 if (n) {
757 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
758 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
759 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
760 }
761 return n;
762}
763#endif
764
765struct psched_ratecfg { 742struct psched_ratecfg {
766 u64 rate_bytes_ps; /* bytes per second */ 743 u64 rate_bytes_ps; /* bytes per second */
767 u32 mult; 744 u32 mult;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2bb2fcf5b11f..495c87e367b3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -223,6 +223,10 @@ struct sctp_sock {
223 atomic_t pd_mode; 223 atomic_t pd_mode;
224 /* Receive to here while partial delivery is in effect. */ 224 /* Receive to here while partial delivery is in effect. */
225 struct sk_buff_head pd_lobby; 225 struct sk_buff_head pd_lobby;
226
227 /* These must be the last fields, as they will skipped on copies,
228 * like on accept and peeloff operations
229 */
226 struct list_head auto_asconf_list; 230 struct list_head auto_asconf_list;
227 int do_auto_asconf; 231 int do_auto_asconf;
228}; 232};
diff --git a/include/net/sock.h b/include/net/sock.h
index 3a4898ec8c67..f21f0708ec59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -184,6 +184,7 @@ struct sock_common {
184 unsigned char skc_reuse:4; 184 unsigned char skc_reuse:4;
185 unsigned char skc_reuseport:1; 185 unsigned char skc_reuseport:1;
186 unsigned char skc_ipv6only:1; 186 unsigned char skc_ipv6only:1;
187 unsigned char skc_net_refcnt:1;
187 int skc_bound_dev_if; 188 int skc_bound_dev_if;
188 union { 189 union {
189 struct hlist_node skc_bind_node; 190 struct hlist_node skc_bind_node;
@@ -276,7 +277,6 @@ struct cg_proto;
276 * @sk_incoming_cpu: record cpu processing incoming packets 277 * @sk_incoming_cpu: record cpu processing incoming packets
277 * @sk_txhash: computed flow hash for use on transmit 278 * @sk_txhash: computed flow hash for use on transmit
278 * @sk_filter: socket filtering instructions 279 * @sk_filter: socket filtering instructions
279 * @sk_protinfo: private area, net family specific, when not using slab
280 * @sk_timer: sock cleanup timer 280 * @sk_timer: sock cleanup timer
281 * @sk_stamp: time stamp of last packet received 281 * @sk_stamp: time stamp of last packet received
282 * @sk_tsflags: SO_TIMESTAMPING socket options 282 * @sk_tsflags: SO_TIMESTAMPING socket options
@@ -323,6 +323,7 @@ struct sock {
323#define sk_reuse __sk_common.skc_reuse 323#define sk_reuse __sk_common.skc_reuse
324#define sk_reuseport __sk_common.skc_reuseport 324#define sk_reuseport __sk_common.skc_reuseport
325#define sk_ipv6only __sk_common.skc_ipv6only 325#define sk_ipv6only __sk_common.skc_ipv6only
326#define sk_net_refcnt __sk_common.skc_net_refcnt
326#define sk_bound_dev_if __sk_common.skc_bound_dev_if 327#define sk_bound_dev_if __sk_common.skc_bound_dev_if
327#define sk_bind_node __sk_common.skc_bind_node 328#define sk_bind_node __sk_common.skc_bind_node
328#define sk_prot __sk_common.skc_prot 329#define sk_prot __sk_common.skc_prot
@@ -414,7 +415,6 @@ struct sock {
414 const struct cred *sk_peer_cred; 415 const struct cred *sk_peer_cred;
415 long sk_rcvtimeo; 416 long sk_rcvtimeo;
416 long sk_sndtimeo; 417 long sk_sndtimeo;
417 void *sk_protinfo;
418 struct timer_list sk_timer; 418 struct timer_list sk_timer;
419 ktime_t sk_stamp; 419 ktime_t sk_stamp;
420 u16 sk_tsflags; 420 u16 sk_tsflags;
@@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
902void sk_set_memalloc(struct sock *sk); 902void sk_set_memalloc(struct sock *sk);
903void sk_clear_memalloc(struct sock *sk); 903void sk_clear_memalloc(struct sock *sk);
904 904
905int sk_wait_data(struct sock *sk, long *timeo); 905int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
906 906
907struct request_sock_ops; 907struct request_sock_ops;
908struct timewait_sock_ops; 908struct timewait_sock_ops;
@@ -924,7 +924,6 @@ static inline void sk_prot_clear_nulls(struct sock *sk, int size)
924 924
925/* Networking protocol blocks we attach to sockets. 925/* Networking protocol blocks we attach to sockets.
926 * socket layer -> transport layer interface 926 * socket layer -> transport layer interface
927 * transport -> network interface is defined by struct inet_proto
928 */ 927 */
929struct proto { 928struct proto {
930 void (*close)(struct sock *sk, 929 void (*close)(struct sock *sk,
@@ -1366,7 +1365,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
1366 * Functions for memory accounting 1365 * Functions for memory accounting
1367 */ 1366 */
1368int __sk_mem_schedule(struct sock *sk, int size, int kind); 1367int __sk_mem_schedule(struct sock *sk, int size, int kind);
1369void __sk_mem_reclaim(struct sock *sk); 1368void __sk_mem_reclaim(struct sock *sk, int amount);
1370 1369
1371#define SK_MEM_QUANTUM ((int)PAGE_SIZE) 1370#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
1372#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) 1371#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1407,7 +1406,7 @@ static inline void sk_mem_reclaim(struct sock *sk)
1407 if (!sk_has_account(sk)) 1406 if (!sk_has_account(sk))
1408 return; 1407 return;
1409 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) 1408 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1410 __sk_mem_reclaim(sk); 1409 __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1411} 1410}
1412 1411
1413static inline void sk_mem_reclaim_partial(struct sock *sk) 1412static inline void sk_mem_reclaim_partial(struct sock *sk)
@@ -1415,7 +1414,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk)
1415 if (!sk_has_account(sk)) 1414 if (!sk_has_account(sk))
1416 return; 1415 return;
1417 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) 1416 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1418 __sk_mem_reclaim(sk); 1417 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1419} 1418}
1420 1419
1421static inline void sk_mem_charge(struct sock *sk, int size) 1420static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1514,9 +1513,9 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
1514 1513
1515 1514
1516struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1515struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1517 struct proto *prot); 1516 struct proto *prot, int kern);
1518void sk_free(struct sock *sk); 1517void sk_free(struct sock *sk);
1519void sk_release_kernel(struct sock *sk); 1518void sk_destruct(struct sock *sk);
1520struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); 1519struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1521 1520
1522struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1521struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
@@ -2024,7 +2023,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2024 } 2023 }
2025} 2024}
2026 2025
2027struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); 2026struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2027 bool force_schedule);
2028 2028
2029/** 2029/**
2030 * sk_page_frag - return an appropriate page_frag 2030 * sk_page_frag - return an appropriate page_frag
@@ -2192,22 +2192,6 @@ void sock_net_set(struct sock *sk, struct net *net)
2192 write_pnet(&sk->sk_net, net); 2192 write_pnet(&sk->sk_net, net);
2193} 2193}
2194 2194
2195/*
2196 * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
2197 * They should not hold a reference to a namespace in order to allow
2198 * to stop it.
2199 * Sockets after sk_change_net should be released using sk_release_kernel
2200 */
2201static inline void sk_change_net(struct sock *sk, struct net *net)
2202{
2203 struct net *current_net = sock_net(sk);
2204
2205 if (!net_eq(current_net, net)) {
2206 put_net(current_net);
2207 sock_net_set(sk, net);
2208 }
2209}
2210
2211static inline struct sock *skb_steal_sock(struct sk_buff *skb) 2195static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2212{ 2196{
2213 if (skb->sk) { 2197 if (skb->sk) {
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d2e69ee3019a..d5671f118bfc 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -14,154 +14,261 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/notifier.h> 15#include <linux/notifier.h>
16 16
17#define SWITCHDEV_F_NO_RECURSE BIT(0)
18
19enum switchdev_trans {
20 SWITCHDEV_TRANS_NONE,
21 SWITCHDEV_TRANS_PREPARE,
22 SWITCHDEV_TRANS_ABORT,
23 SWITCHDEV_TRANS_COMMIT,
24};
25
26enum switchdev_attr_id {
27 SWITCHDEV_ATTR_UNDEFINED,
28 SWITCHDEV_ATTR_PORT_PARENT_ID,
29 SWITCHDEV_ATTR_PORT_STP_STATE,
30 SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
31};
32
33struct switchdev_attr {
34 enum switchdev_attr_id id;
35 enum switchdev_trans trans;
36 u32 flags;
37 union {
38 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
39 u8 stp_state; /* PORT_STP_STATE */
40 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
41 } u;
42};
43
17struct fib_info; 44struct fib_info;
18 45
46enum switchdev_obj_id {
47 SWITCHDEV_OBJ_UNDEFINED,
48 SWITCHDEV_OBJ_PORT_VLAN,
49 SWITCHDEV_OBJ_IPV4_FIB,
50 SWITCHDEV_OBJ_PORT_FDB,
51};
52
53struct switchdev_obj {
54 enum switchdev_obj_id id;
55 enum switchdev_trans trans;
56 int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
57 union {
58 struct switchdev_obj_vlan { /* PORT_VLAN */
59 u16 flags;
60 u16 vid_begin;
61 u16 vid_end;
62 } vlan;
63 struct switchdev_obj_ipv4_fib { /* IPV4_FIB */
64 u32 dst;
65 int dst_len;
66 struct fib_info *fi;
67 u8 tos;
68 u8 type;
69 u32 nlflags;
70 u32 tb_id;
71 } ipv4_fib;
72 struct switchdev_obj_fdb { /* PORT_FDB */
73 const unsigned char *addr;
74 u16 vid;
75 } fdb;
76 } u;
77};
78
19/** 79/**
20 * struct switchdev_ops - switchdev operations 80 * struct switchdev_ops - switchdev operations
21 * 81 *
22 * @swdev_parent_id_get: Called to get an ID of the switch chip this port 82 * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
23 * is part of. If driver implements this, it indicates that it 83 *
24 * represents a port of a switch chip. 84 * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
25 * 85 *
26 * @swdev_port_stp_update: Called to notify switch device port of bridge 86 * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
27 * port STP state change.
28 * 87 *
29 * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device. 88 * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
30 * 89 *
31 * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device. 90 * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
32 */ 91 */
33struct swdev_ops { 92struct switchdev_ops {
34 int (*swdev_parent_id_get)(struct net_device *dev, 93 int (*switchdev_port_attr_get)(struct net_device *dev,
35 struct netdev_phys_item_id *psid); 94 struct switchdev_attr *attr);
36 int (*swdev_port_stp_update)(struct net_device *dev, u8 state); 95 int (*switchdev_port_attr_set)(struct net_device *dev,
37 int (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst, 96 struct switchdev_attr *attr);
38 int dst_len, struct fib_info *fi, 97 int (*switchdev_port_obj_add)(struct net_device *dev,
39 u8 tos, u8 type, u32 nlflags, 98 struct switchdev_obj *obj);
40 u32 tb_id); 99 int (*switchdev_port_obj_del)(struct net_device *dev,
41 int (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst, 100 struct switchdev_obj *obj);
42 int dst_len, struct fib_info *fi, 101 int (*switchdev_port_obj_dump)(struct net_device *dev,
43 u8 tos, u8 type, u32 tb_id); 102 struct switchdev_obj *obj);
44}; 103};
45 104
46enum netdev_switch_notifier_type { 105enum switchdev_notifier_type {
47 NETDEV_SWITCH_FDB_ADD = 1, 106 SWITCHDEV_FDB_ADD = 1,
48 NETDEV_SWITCH_FDB_DEL, 107 SWITCHDEV_FDB_DEL,
49}; 108};
50 109
51struct netdev_switch_notifier_info { 110struct switchdev_notifier_info {
52 struct net_device *dev; 111 struct net_device *dev;
53}; 112};
54 113
55struct netdev_switch_notifier_fdb_info { 114struct switchdev_notifier_fdb_info {
56 struct netdev_switch_notifier_info info; /* must be first */ 115 struct switchdev_notifier_info info; /* must be first */
57 const unsigned char *addr; 116 const unsigned char *addr;
58 u16 vid; 117 u16 vid;
59}; 118};
60 119
61static inline struct net_device * 120static inline struct net_device *
62netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info) 121switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
63{ 122{
64 return info->dev; 123 return info->dev;
65} 124}
66 125
67#ifdef CONFIG_NET_SWITCHDEV 126#ifdef CONFIG_NET_SWITCHDEV
68 127
69int netdev_switch_parent_id_get(struct net_device *dev, 128int switchdev_port_attr_get(struct net_device *dev,
70 struct netdev_phys_item_id *psid); 129 struct switchdev_attr *attr);
71int netdev_switch_port_stp_update(struct net_device *dev, u8 state); 130int switchdev_port_attr_set(struct net_device *dev,
72int register_netdev_switch_notifier(struct notifier_block *nb); 131 struct switchdev_attr *attr);
73int unregister_netdev_switch_notifier(struct notifier_block *nb); 132int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
74int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev, 133int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
75 struct netdev_switch_notifier_info *info); 134int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
76int netdev_switch_port_bridge_setlink(struct net_device *dev, 135int register_switchdev_notifier(struct notifier_block *nb);
77 struct nlmsghdr *nlh, u16 flags); 136int unregister_switchdev_notifier(struct notifier_block *nb);
78int netdev_switch_port_bridge_dellink(struct net_device *dev, 137int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
79 struct nlmsghdr *nlh, u16 flags); 138 struct switchdev_notifier_info *info);
80int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev, 139int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
81 struct nlmsghdr *nlh, u16 flags); 140 struct net_device *dev, u32 filter_mask,
82int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev, 141 int nlflags);
83 struct nlmsghdr *nlh, u16 flags); 142int switchdev_port_bridge_setlink(struct net_device *dev,
84int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, 143 struct nlmsghdr *nlh, u16 flags);
85 u8 tos, u8 type, u32 nlflags, u32 tb_id); 144int switchdev_port_bridge_dellink(struct net_device *dev,
86int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, 145 struct nlmsghdr *nlh, u16 flags);
87 u8 tos, u8 type, u32 tb_id); 146int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
88void netdev_switch_fib_ipv4_abort(struct fib_info *fi); 147 u8 tos, u8 type, u32 nlflags, u32 tb_id);
148int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
149 u8 tos, u8 type, u32 tb_id);
150void switchdev_fib_ipv4_abort(struct fib_info *fi);
151int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
152 struct net_device *dev, const unsigned char *addr,
153 u16 vid, u16 nlm_flags);
154int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
155 struct net_device *dev, const unsigned char *addr,
156 u16 vid);
157int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
158 struct net_device *dev,
159 struct net_device *filter_dev, int idx);
89 160
90#else 161#else
91 162
92static inline int netdev_switch_parent_id_get(struct net_device *dev, 163static inline int switchdev_port_attr_get(struct net_device *dev,
93 struct netdev_phys_item_id *psid) 164 struct switchdev_attr *attr)
165{
166 return -EOPNOTSUPP;
167}
168
169static inline int switchdev_port_attr_set(struct net_device *dev,
170 struct switchdev_attr *attr)
171{
172 return -EOPNOTSUPP;
173}
174
175static inline int switchdev_port_obj_add(struct net_device *dev,
176 struct switchdev_obj *obj)
94{ 177{
95 return -EOPNOTSUPP; 178 return -EOPNOTSUPP;
96} 179}
97 180
98static inline int netdev_switch_port_stp_update(struct net_device *dev, 181static inline int switchdev_port_obj_del(struct net_device *dev,
99 u8 state) 182 struct switchdev_obj *obj)
100{ 183{
101 return -EOPNOTSUPP; 184 return -EOPNOTSUPP;
102} 185}
103 186
104static inline int register_netdev_switch_notifier(struct notifier_block *nb) 187static inline int switchdev_port_obj_dump(struct net_device *dev,
188 struct switchdev_obj *obj)
189{
190 return -EOPNOTSUPP;
191}
192
193static inline int register_switchdev_notifier(struct notifier_block *nb)
105{ 194{
106 return 0; 195 return 0;
107} 196}
108 197
109static inline int unregister_netdev_switch_notifier(struct notifier_block *nb) 198static inline int unregister_switchdev_notifier(struct notifier_block *nb)
110{ 199{
111 return 0; 200 return 0;
112} 201}
113 202
114static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev, 203static inline int call_switchdev_notifiers(unsigned long val,
115 struct netdev_switch_notifier_info *info) 204 struct net_device *dev,
205 struct switchdev_notifier_info *info)
116{ 206{
117 return NOTIFY_DONE; 207 return NOTIFY_DONE;
118} 208}
119 209
120static inline int netdev_switch_port_bridge_setlink(struct net_device *dev, 210static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
121 struct nlmsghdr *nlh, 211 u32 seq, struct net_device *dev,
122 u16 flags) 212 u32 filter_mask, int nlflags)
123{ 213{
124 return -EOPNOTSUPP; 214 return -EOPNOTSUPP;
125} 215}
126 216
127static inline int netdev_switch_port_bridge_dellink(struct net_device *dev, 217static inline int switchdev_port_bridge_setlink(struct net_device *dev,
128 struct nlmsghdr *nlh, 218 struct nlmsghdr *nlh,
129 u16 flags) 219 u16 flags)
130{ 220{
131 return -EOPNOTSUPP; 221 return -EOPNOTSUPP;
132} 222}
133 223
134static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev, 224static inline int switchdev_port_bridge_dellink(struct net_device *dev,
135 struct nlmsghdr *nlh, 225 struct nlmsghdr *nlh,
136 u16 flags) 226 u16 flags)
137{ 227{
138 return 0; 228 return -EOPNOTSUPP;
139} 229}
140 230
141static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev, 231static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
142 struct nlmsghdr *nlh, 232 struct fib_info *fi,
143 u16 flags) 233 u8 tos, u8 type,
234 u32 nlflags, u32 tb_id)
144{ 235{
145 return 0; 236 return 0;
146} 237}
147 238
148static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, 239static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
149 struct fib_info *fi, 240 struct fib_info *fi,
150 u8 tos, u8 type, 241 u8 tos, u8 type, u32 tb_id)
151 u32 nlflags, u32 tb_id)
152{ 242{
153 return 0; 243 return 0;
154} 244}
155 245
156static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, 246static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
157 struct fib_info *fi,
158 u8 tos, u8 type, u32 tb_id)
159{ 247{
160 return 0;
161} 248}
162 249
163static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi) 250static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
251 struct net_device *dev,
252 const unsigned char *addr,
253 u16 vid, u16 nlm_flags)
164{ 254{
255 return -EOPNOTSUPP;
256}
257
258static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
259 struct net_device *dev,
260 const unsigned char *addr, u16 vid)
261{
262 return -EOPNOTSUPP;
263}
264
265static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
266 struct netlink_callback *cb,
267 struct net_device *dev,
268 struct net_device *filter_dev,
269 int idx)
270{
271 return -EOPNOTSUPP;
165} 272}
166 273
167#endif 274#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6d204f3f9df8..950cfecaad3c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -286,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated;
286extern struct percpu_counter tcp_sockets_allocated; 286extern struct percpu_counter tcp_sockets_allocated;
287extern int tcp_memory_pressure; 287extern int tcp_memory_pressure;
288 288
289/* optimized version of sk_under_memory_pressure() for TCP sockets */
290static inline bool tcp_under_memory_pressure(const struct sock *sk)
291{
292 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
293 return !!sk->sk_cgrp->memory_pressure;
294
295 return tcp_memory_pressure;
296}
289/* 297/*
290 * The next routines deal with comparing 32 bit unsigned ints 298 * The next routines deal with comparing 32 bit unsigned ints
291 * and worry about wraparound (automatic with unsigned arithmetic). 299 * and worry about wraparound (automatic with unsigned arithmetic).
@@ -311,6 +319,8 @@ static inline bool tcp_out_of_memory(struct sock *sk)
311 return false; 319 return false;
312} 320}
313 321
322void sk_forced_mem_schedule(struct sock *sk, int size);
323
314static inline bool tcp_too_many_orphans(struct sock *sk, int shift) 324static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
315{ 325{
316 struct percpu_counter *ocp = sk->sk_prot->orphan_count; 326 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -326,18 +336,6 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
326 336
327bool tcp_check_oom(struct sock *sk, int shift); 337bool tcp_check_oom(struct sock *sk, int shift);
328 338
329/* syncookies: remember time of last synqueue overflow */
330static inline void tcp_synq_overflow(struct sock *sk)
331{
332 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
333}
334
335/* syncookies: no recent synqueue overflow on this listening socket? */
336static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
337{
338 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
339 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
340}
341 339
342extern struct proto tcp_prot; 340extern struct proto tcp_prot;
343 341
@@ -471,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
471void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); 469void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
472 470
473/* From syncookies.c */ 471/* From syncookies.c */
472struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
473 struct request_sock *req,
474 struct dst_entry *dst);
474int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, 475int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
475 u32 cookie); 476 u32 cookie);
476struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); 477struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -483,13 +484,35 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
483 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if 484 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
484 * the counter advances immediately after a cookie is generated). 485 * the counter advances immediately after a cookie is generated).
485 */ 486 */
486#define MAX_SYNCOOKIE_AGE 2 487#define MAX_SYNCOOKIE_AGE 2
488#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
489#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
490
491/* syncookies: remember time of last synqueue overflow
492 * But do not dirty this field too often (once per second is enough)
493 */
494static inline void tcp_synq_overflow(struct sock *sk)
495{
496 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
497 unsigned long now = jiffies;
498
499 if (time_after(now, last_overflow + HZ))
500 tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
501}
502
503/* syncookies: no recent synqueue overflow on this listening socket? */
504static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
505{
506 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
507
508 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
509}
487 510
488static inline u32 tcp_cookie_time(void) 511static inline u32 tcp_cookie_time(void)
489{ 512{
490 u64 val = get_jiffies_64(); 513 u64 val = get_jiffies_64();
491 514
492 do_div(val, 60 * HZ); 515 do_div(val, TCP_SYNCOOKIE_PERIOD);
493 return val; 516 return val;
494} 517}
495 518
@@ -527,7 +550,7 @@ int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
527 550
528void tcp_send_probe0(struct sock *); 551void tcp_send_probe0(struct sock *);
529void tcp_send_partial(struct sock *); 552void tcp_send_partial(struct sock *);
530int tcp_write_wakeup(struct sock *); 553int tcp_write_wakeup(struct sock *, int mib);
531void tcp_send_fin(struct sock *sk); 554void tcp_send_fin(struct sock *sk);
532void tcp_send_active_reset(struct sock *sk, gfp_t priority); 555void tcp_send_active_reset(struct sock *sk, gfp_t priority);
533int tcp_send_synack(struct sock *); 556int tcp_send_synack(struct sock *);
@@ -692,6 +715,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
692#define TCPHDR_ECE 0x40 715#define TCPHDR_ECE 0x40
693#define TCPHDR_CWR 0x80 716#define TCPHDR_CWR 0x80
694 717
718#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
719
695/* This is what the send packet queuing engine uses to pass 720/* This is what the send packet queuing engine uses to pass
696 * TCP per-packet control information to the transmission code. 721 * TCP per-packet control information to the transmission code.
697 * We also store the host-order sequence numbers in here too. 722 * We also store the host-order sequence numbers in here too.
@@ -705,11 +730,14 @@ struct tcp_skb_cb {
705 /* Note : tcp_tw_isn is used in input path only 730 /* Note : tcp_tw_isn is used in input path only
706 * (isn chosen by tcp_timewait_state_process()) 731 * (isn chosen by tcp_timewait_state_process())
707 * 732 *
708 * tcp_gso_segs is used in write queue only, 733 * tcp_gso_segs/size are used in write queue only,
709 * cf tcp_skb_pcount() 734 * cf tcp_skb_pcount()/tcp_skb_mss()
710 */ 735 */
711 __u32 tcp_tw_isn; 736 __u32 tcp_tw_isn;
712 __u32 tcp_gso_segs; 737 struct {
738 u16 tcp_gso_segs;
739 u16 tcp_gso_size;
740 };
713 }; 741 };
714 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ 742 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
715 743
@@ -765,10 +793,10 @@ static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
765 TCP_SKB_CB(skb)->tcp_gso_segs += segs; 793 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
766} 794}
767 795
768/* This is valid iff tcp_skb_pcount() > 1. */ 796/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
769static inline int tcp_skb_mss(const struct sk_buff *skb) 797static inline int tcp_skb_mss(const struct sk_buff *skb)
770{ 798{
771 return skb_shinfo(skb)->gso_size; 799 return TCP_SKB_CB(skb)->tcp_gso_size;
772} 800}
773 801
774/* Events passed to congestion control interface */ 802/* Events passed to congestion control interface */
@@ -1043,14 +1071,31 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1043 return tp->is_cwnd_limited; 1071 return tp->is_cwnd_limited;
1044} 1072}
1045 1073
1046static inline void tcp_check_probe_timer(struct sock *sk) 1074/* Something is really bad, we could not queue an additional packet,
1075 * because qdisc is full or receiver sent a 0 window.
1076 * We do not want to add fuel to the fire, or abort too early,
1077 * so make sure the timer we arm now is at least 200ms in the future,
1078 * regardless of current icsk_rto value (as it could be ~2ms)
1079 */
1080static inline unsigned long tcp_probe0_base(const struct sock *sk)
1047{ 1081{
1048 const struct tcp_sock *tp = tcp_sk(sk); 1082 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1049 const struct inet_connection_sock *icsk = inet_csk(sk); 1083}
1084
1085/* Variant of inet_csk_rto_backoff() used for zero window probes */
1086static inline unsigned long tcp_probe0_when(const struct sock *sk,
1087 unsigned long max_when)
1088{
1089 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1050 1090
1051 if (!tp->packets_out && !icsk->icsk_pending) 1091 return (unsigned long)min_t(u64, when, max_when);
1092}
1093
1094static inline void tcp_check_probe_timer(struct sock *sk)
1095{
1096 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1052 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 1097 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1053 icsk->icsk_rto, TCP_RTO_MAX); 1098 tcp_probe0_base(sk), TCP_RTO_MAX);
1054} 1099}
1055 1100
1056static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 1101static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 79abb9c71772..1443d79e4fe6 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -11,6 +11,7 @@
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/aer.h> 12#include <linux/aer.h>
13#include <linux/cper.h> 13#include <linux/cper.h>
14#include <linux/mm.h>
14 15
15/* 16/*
16 * MCE Extended Error Log trace event 17 * MCE Extended Error Log trace event
@@ -232,6 +233,90 @@ TRACE_EVENT(aer_event,
232 __print_flags(__entry->status, "|", aer_uncorrectable_errors)) 233 __print_flags(__entry->status, "|", aer_uncorrectable_errors))
233); 234);
234 235
236/*
237 * memory-failure recovery action result event
238 *
239 * unsigned long pfn - Page Frame Number of the corrupted page
240 * int type - Page types of the corrupted page
241 * int result - Result of recovery action
242 */
243
244#ifdef CONFIG_MEMORY_FAILURE
245#define MF_ACTION_RESULT \
246 EM ( MF_IGNORED, "Ignored" ) \
247 EM ( MF_FAILED, "Failed" ) \
248 EM ( MF_DELAYED, "Delayed" ) \
249 EMe ( MF_RECOVERED, "Recovered" )
250
251#define MF_PAGE_TYPE \
252 EM ( MF_MSG_KERNEL, "reserved kernel page" ) \
253 EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
254 EM ( MF_MSG_SLAB, "kernel slab page" ) \
255 EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
256 EM ( MF_MSG_POISONED_HUGE, "huge page already hardware poisoned" ) \
257 EM ( MF_MSG_HUGE, "huge page" ) \
258 EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
259 EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
260 EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \
261 EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \
262 EM ( MF_MSG_DIRTY_MLOCKED_LRU, "dirty mlocked LRU page" ) \
263 EM ( MF_MSG_CLEAN_MLOCKED_LRU, "clean mlocked LRU page" ) \
264 EM ( MF_MSG_DIRTY_UNEVICTABLE_LRU, "dirty unevictable LRU page" ) \
265 EM ( MF_MSG_CLEAN_UNEVICTABLE_LRU, "clean unevictable LRU page" ) \
266 EM ( MF_MSG_DIRTY_LRU, "dirty LRU page" ) \
267 EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
268 EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
269 EM ( MF_MSG_BUDDY, "free buddy page" ) \
270 EM ( MF_MSG_BUDDY_2ND, "free buddy page (2nd try)" ) \
271 EMe ( MF_MSG_UNKNOWN, "unknown page" )
272
273/*
274 * First define the enums in MM_ACTION_RESULT to be exported to userspace
275 * via TRACE_DEFINE_ENUM().
276 */
277#undef EM
278#undef EMe
279#define EM(a, b) TRACE_DEFINE_ENUM(a);
280#define EMe(a, b) TRACE_DEFINE_ENUM(a);
281
282MF_ACTION_RESULT
283MF_PAGE_TYPE
284
285/*
286 * Now redefine the EM() and EMe() macros to map the enums to the strings
287 * that will be printed in the output.
288 */
289#undef EM
290#undef EMe
291#define EM(a, b) { a, b },
292#define EMe(a, b) { a, b }
293
294TRACE_EVENT(memory_failure_event,
295 TP_PROTO(unsigned long pfn,
296 int type,
297 int result),
298
299 TP_ARGS(pfn, type, result),
300
301 TP_STRUCT__entry(
302 __field(unsigned long, pfn)
303 __field(int, type)
304 __field(int, result)
305 ),
306
307 TP_fast_assign(
308 __entry->pfn = pfn;
309 __entry->type = type;
310 __entry->result = result;
311 ),
312
313 TP_printk("pfn %#lx: recovery action for %s: %s",
314 __entry->pfn,
315 __print_symbolic(__entry->type, MF_PAGE_TYPE),
316 __print_symbolic(__entry->result, MF_ACTION_RESULT)
317 )
318);
319#endif /* CONFIG_MEMORY_FAILURE */
235#endif /* _TRACE_HW_EVENT_MC_H */ 320#endif /* _TRACE_HW_EVENT_MC_H */
236 321
237/* This part must be outside protection */ 322/* This part must be outside protection */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ac54c27a2bfd..fde33ac6b58a 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -111,8 +111,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
111int rdma_addr_size(struct sockaddr *addr); 111int rdma_addr_size(struct sockaddr *addr);
112 112
113int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id); 113int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
114int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac, 114int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
115 u16 *vlan_id); 115 u8 *smac, u16 *vlan_id);
116 116
117static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) 117static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
118{ 118{
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
160} 160}
161 161
162/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ 162/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
163static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) 163static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
164{ 164{
165 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { 165 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
166 struct sockaddr_in *out_in = (struct sockaddr_in *)out; 166 struct sockaddr_in *out_in = (struct sockaddr_in *)out;
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index ad9a3c280944..bd92130f4ac5 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -64,10 +64,10 @@ int ib_get_cached_gid(struct ib_device *device,
64 * ib_find_cached_gid() searches for the specified GID value in 64 * ib_find_cached_gid() searches for the specified GID value in
65 * the local software cache. 65 * the local software cache.
66 */ 66 */
67int ib_find_cached_gid(struct ib_device *device, 67int ib_find_cached_gid(struct ib_device *device,
68 union ib_gid *gid, 68 const union ib_gid *gid,
69 u8 *port_num, 69 u8 *port_num,
70 u16 *index); 70 u16 *index);
71 71
72/** 72/**
73 * ib_get_cached_pkey - Returns a cached PKey table entry 73 * ib_get_cached_pkey - Returns a cached PKey table entry
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 9bb99e983f58..c8422d5a5a91 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -42,8 +42,11 @@
42#include <rdma/ib_verbs.h> 42#include <rdma/ib_verbs.h>
43#include <uapi/rdma/ib_user_mad.h> 43#include <uapi/rdma/ib_user_mad.h>
44 44
45/* Management base version */ 45/* Management base versions */
46#define IB_MGMT_BASE_VERSION 1 46#define IB_MGMT_BASE_VERSION 1
47#define OPA_MGMT_BASE_VERSION 0x80
48
49#define OPA_SMP_CLASS_VERSION 0x80
47 50
48/* Management classes */ 51/* Management classes */
49#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01 52#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
@@ -135,6 +138,10 @@ enum {
135 IB_MGMT_SA_DATA = 200, 138 IB_MGMT_SA_DATA = 200,
136 IB_MGMT_DEVICE_HDR = 64, 139 IB_MGMT_DEVICE_HDR = 64,
137 IB_MGMT_DEVICE_DATA = 192, 140 IB_MGMT_DEVICE_DATA = 192,
141 IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
142 OPA_MGMT_MAD_DATA = 2024,
143 OPA_MGMT_RMPP_DATA = 2012,
144 OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
138}; 145};
139 146
140struct ib_mad_hdr { 147struct ib_mad_hdr {
@@ -181,12 +188,23 @@ struct ib_mad {
181 u8 data[IB_MGMT_MAD_DATA]; 188 u8 data[IB_MGMT_MAD_DATA];
182}; 189};
183 190
191struct opa_mad {
192 struct ib_mad_hdr mad_hdr;
193 u8 data[OPA_MGMT_MAD_DATA];
194};
195
184struct ib_rmpp_mad { 196struct ib_rmpp_mad {
185 struct ib_mad_hdr mad_hdr; 197 struct ib_mad_hdr mad_hdr;
186 struct ib_rmpp_hdr rmpp_hdr; 198 struct ib_rmpp_hdr rmpp_hdr;
187 u8 data[IB_MGMT_RMPP_DATA]; 199 u8 data[IB_MGMT_RMPP_DATA];
188}; 200};
189 201
202struct opa_rmpp_mad {
203 struct ib_mad_hdr mad_hdr;
204 struct ib_rmpp_hdr rmpp_hdr;
205 u8 data[OPA_MGMT_RMPP_DATA];
206};
207
190struct ib_sa_mad { 208struct ib_sa_mad {
191 struct ib_mad_hdr mad_hdr; 209 struct ib_mad_hdr mad_hdr;
192 struct ib_rmpp_hdr rmpp_hdr; 210 struct ib_rmpp_hdr rmpp_hdr;
@@ -235,7 +253,10 @@ struct ib_class_port_info {
235 * includes the common MAD, RMPP, and class specific headers. 253 * includes the common MAD, RMPP, and class specific headers.
236 * @data_len: Indicates the total size of user-transferred data. 254 * @data_len: Indicates the total size of user-transferred data.
237 * @seg_count: The number of RMPP segments allocated for this send. 255 * @seg_count: The number of RMPP segments allocated for this send.
238 * @seg_size: Size of each RMPP segment. 256 * @seg_size: Size of the data in each RMPP segment. This does not include
257 * class specific headers.
258 * @seg_rmpp_size: Size of each RMPP segment including the class specific
259 * headers.
239 * @timeout_ms: Time to wait for a response. 260 * @timeout_ms: Time to wait for a response.
240 * @retries: Number of times to retry a request for a response. For MADs 261 * @retries: Number of times to retry a request for a response. For MADs
241 * using RMPP, this applies per window. On completion, returns the number 262 * using RMPP, this applies per window. On completion, returns the number
@@ -255,6 +276,7 @@ struct ib_mad_send_buf {
255 int data_len; 276 int data_len;
256 int seg_count; 277 int seg_count;
257 int seg_size; 278 int seg_size;
279 int seg_rmpp_size;
258 int timeout_ms; 280 int timeout_ms;
259 int retries; 281 int retries;
260}; 282};
@@ -263,7 +285,7 @@ struct ib_mad_send_buf {
263 * ib_response_mad - Returns if the specified MAD has been generated in 285 * ib_response_mad - Returns if the specified MAD has been generated in
264 * response to a sent request or trap. 286 * response to a sent request or trap.
265 */ 287 */
266int ib_response_mad(struct ib_mad *mad); 288int ib_response_mad(const struct ib_mad_hdr *hdr);
267 289
268/** 290/**
269 * ib_get_rmpp_resptime - Returns the RMPP response time. 291 * ib_get_rmpp_resptime - Returns the RMPP response time.
@@ -401,7 +423,10 @@ struct ib_mad_send_wc {
401struct ib_mad_recv_buf { 423struct ib_mad_recv_buf {
402 struct list_head list; 424 struct list_head list;
403 struct ib_grh *grh; 425 struct ib_grh *grh;
404 struct ib_mad *mad; 426 union {
427 struct ib_mad *mad;
428 struct opa_mad *opa_mad;
429 };
405}; 430};
406 431
407/** 432/**
@@ -410,6 +435,7 @@ struct ib_mad_recv_buf {
410 * @recv_buf: Specifies the location of the received data buffer(s). 435 * @recv_buf: Specifies the location of the received data buffer(s).
411 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. 436 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
412 * @mad_len: The length of the received MAD, without duplicated headers. 437 * @mad_len: The length of the received MAD, without duplicated headers.
438 * @mad_seg_size: The size of individual MAD segments
413 * 439 *
414 * For received response, the wr_id contains a pointer to the ib_mad_send_buf 440 * For received response, the wr_id contains a pointer to the ib_mad_send_buf
415 * for the corresponding send request. 441 * for the corresponding send request.
@@ -419,6 +445,7 @@ struct ib_mad_recv_wc {
419 struct ib_mad_recv_buf recv_buf; 445 struct ib_mad_recv_buf recv_buf;
420 struct list_head rmpp_list; 446 struct list_head rmpp_list;
421 int mad_len; 447 int mad_len;
448 size_t mad_seg_size;
422}; 449};
423 450
424/** 451/**
@@ -618,6 +645,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
618 * automatically adjust the allocated buffer size to account for any 645 * automatically adjust the allocated buffer size to account for any
619 * additional padding that may be necessary. 646 * additional padding that may be necessary.
620 * @gfp_mask: GFP mask used for the memory allocation. 647 * @gfp_mask: GFP mask used for the memory allocation.
648 * @base_version: Base Version of this MAD
621 * 649 *
622 * This routine allocates a MAD for sending. The returned MAD send buffer 650 * This routine allocates a MAD for sending. The returned MAD send buffer
623 * will reference a data buffer usable for sending a MAD, along 651 * will reference a data buffer usable for sending a MAD, along
@@ -633,7 +661,8 @@ struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
633 u32 remote_qpn, u16 pkey_index, 661 u32 remote_qpn, u16 pkey_index,
634 int rmpp_active, 662 int rmpp_active,
635 int hdr_len, int data_len, 663 int hdr_len, int data_len,
636 gfp_t gfp_mask); 664 gfp_t gfp_mask,
665 u8 base_version);
637 666
638/** 667/**
639 * ib_is_mad_class_rmpp - returns whether given management class 668 * ib_is_mad_class_rmpp - returns whether given management class
@@ -675,6 +704,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
675 * @agent: the agent in question 704 * @agent: the agent in question
676 * @return: true if agent is performing rmpp, false otherwise. 705 * @return: true if agent is performing rmpp, false otherwise.
677 */ 706 */
678int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent); 707int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
679 708
680#endif /* IB_MAD_H */ 709#endif /* IB_MAD_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 65994a19e840..b0f898e3b2e7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -81,6 +81,13 @@ enum rdma_transport_type {
81 RDMA_TRANSPORT_USNIC_UDP 81 RDMA_TRANSPORT_USNIC_UDP
82}; 82};
83 83
84enum rdma_protocol_type {
85 RDMA_PROTOCOL_IB,
86 RDMA_PROTOCOL_IBOE,
87 RDMA_PROTOCOL_IWARP,
88 RDMA_PROTOCOL_USNIC_UDP
89};
90
84__attribute_const__ enum rdma_transport_type 91__attribute_const__ enum rdma_transport_type
85rdma_node_get_transport(enum rdma_node_type node_type); 92rdma_node_get_transport(enum rdma_node_type node_type);
86 93
@@ -166,6 +173,16 @@ struct ib_odp_caps {
166 } per_transport_caps; 173 } per_transport_caps;
167}; 174};
168 175
176enum ib_cq_creation_flags {
177 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
178};
179
180struct ib_cq_init_attr {
181 unsigned int cqe;
182 int comp_vector;
183 u32 flags;
184};
185
169struct ib_device_attr { 186struct ib_device_attr {
170 u64 fw_ver; 187 u64 fw_ver;
171 __be64 sys_image_guid; 188 __be64 sys_image_guid;
@@ -210,6 +227,8 @@ struct ib_device_attr {
210 int sig_prot_cap; 227 int sig_prot_cap;
211 int sig_guard_cap; 228 int sig_guard_cap;
212 struct ib_odp_caps odp_caps; 229 struct ib_odp_caps odp_caps;
230 uint64_t timestamp_mask;
231 uint64_t hca_core_clock; /* in KHZ */
213}; 232};
214 233
215enum ib_mtu { 234enum ib_mtu {
@@ -346,6 +365,42 @@ union rdma_protocol_stats {
346 struct iw_protocol_stats iw; 365 struct iw_protocol_stats iw;
347}; 366};
348 367
368/* Define bits for the various functionality this port needs to be supported by
369 * the core.
370 */
371/* Management 0x00000FFF */
372#define RDMA_CORE_CAP_IB_MAD 0x00000001
373#define RDMA_CORE_CAP_IB_SMI 0x00000002
374#define RDMA_CORE_CAP_IB_CM 0x00000004
375#define RDMA_CORE_CAP_IW_CM 0x00000008
376#define RDMA_CORE_CAP_IB_SA 0x00000010
377#define RDMA_CORE_CAP_OPA_MAD 0x00000020
378
379/* Address format 0x000FF000 */
380#define RDMA_CORE_CAP_AF_IB 0x00001000
381#define RDMA_CORE_CAP_ETH_AH 0x00002000
382
383/* Protocol 0xFFF00000 */
384#define RDMA_CORE_CAP_PROT_IB 0x00100000
385#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
386#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
387
388#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
389 | RDMA_CORE_CAP_IB_MAD \
390 | RDMA_CORE_CAP_IB_SMI \
391 | RDMA_CORE_CAP_IB_CM \
392 | RDMA_CORE_CAP_IB_SA \
393 | RDMA_CORE_CAP_AF_IB)
394#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
395 | RDMA_CORE_CAP_IB_MAD \
396 | RDMA_CORE_CAP_IB_CM \
397 | RDMA_CORE_CAP_AF_IB \
398 | RDMA_CORE_CAP_ETH_AH)
399#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
400 | RDMA_CORE_CAP_IW_CM)
401#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
402 | RDMA_CORE_CAP_OPA_MAD)
403
349struct ib_port_attr { 404struct ib_port_attr {
350 enum ib_port_state state; 405 enum ib_port_state state;
351 enum ib_mtu max_mtu; 406 enum ib_mtu max_mtu;
@@ -412,6 +467,8 @@ enum ib_event_type {
412 IB_EVENT_GID_CHANGE, 467 IB_EVENT_GID_CHANGE,
413}; 468};
414 469
470__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
471
415struct ib_event { 472struct ib_event {
416 struct ib_device *device; 473 struct ib_device *device;
417 union { 474 union {
@@ -663,6 +720,8 @@ enum ib_wc_status {
663 IB_WC_GENERAL_ERR 720 IB_WC_GENERAL_ERR
664}; 721};
665 722
723__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
724
666enum ib_wc_opcode { 725enum ib_wc_opcode {
667 IB_WC_SEND, 726 IB_WC_SEND,
668 IB_WC_RDMA_WRITE, 727 IB_WC_RDMA_WRITE,
@@ -1407,7 +1466,7 @@ struct ib_flow {
1407 struct ib_uobject *uobject; 1466 struct ib_uobject *uobject;
1408}; 1467};
1409 1468
1410struct ib_mad; 1469struct ib_mad_hdr;
1411struct ib_grh; 1470struct ib_grh;
1412 1471
1413enum ib_process_mad_flags { 1472enum ib_process_mad_flags {
@@ -1474,6 +1533,13 @@ struct ib_dma_mapping_ops {
1474 1533
1475struct iw_cm_verbs; 1534struct iw_cm_verbs;
1476 1535
1536struct ib_port_immutable {
1537 int pkey_tbl_len;
1538 int gid_tbl_len;
1539 u32 core_cap_flags;
1540 u32 max_mad_size;
1541};
1542
1477struct ib_device { 1543struct ib_device {
1478 struct device *dma_device; 1544 struct device *dma_device;
1479 1545
@@ -1487,8 +1553,10 @@ struct ib_device {
1487 struct list_head client_data_list; 1553 struct list_head client_data_list;
1488 1554
1489 struct ib_cache cache; 1555 struct ib_cache cache;
1490 int *pkey_tbl_len; 1556 /**
1491 int *gid_tbl_len; 1557 * port_immutable is indexed by port number
1558 */
1559 struct ib_port_immutable *port_immutable;
1492 1560
1493 int num_comp_vectors; 1561 int num_comp_vectors;
1494 1562
@@ -1497,7 +1565,8 @@ struct ib_device {
1497 int (*get_protocol_stats)(struct ib_device *device, 1565 int (*get_protocol_stats)(struct ib_device *device,
1498 union rdma_protocol_stats *stats); 1566 union rdma_protocol_stats *stats);
1499 int (*query_device)(struct ib_device *device, 1567 int (*query_device)(struct ib_device *device,
1500 struct ib_device_attr *device_attr); 1568 struct ib_device_attr *device_attr,
1569 struct ib_udata *udata);
1501 int (*query_port)(struct ib_device *device, 1570 int (*query_port)(struct ib_device *device,
1502 u8 port_num, 1571 u8 port_num,
1503 struct ib_port_attr *port_attr); 1572 struct ib_port_attr *port_attr);
@@ -1561,8 +1630,8 @@ struct ib_device {
1561 int (*post_recv)(struct ib_qp *qp, 1630 int (*post_recv)(struct ib_qp *qp,
1562 struct ib_recv_wr *recv_wr, 1631 struct ib_recv_wr *recv_wr,
1563 struct ib_recv_wr **bad_recv_wr); 1632 struct ib_recv_wr **bad_recv_wr);
1564 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 1633 struct ib_cq * (*create_cq)(struct ib_device *device,
1565 int comp_vector, 1634 const struct ib_cq_init_attr *attr,
1566 struct ib_ucontext *context, 1635 struct ib_ucontext *context,
1567 struct ib_udata *udata); 1636 struct ib_udata *udata);
1568 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1637 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
@@ -1637,10 +1706,13 @@ struct ib_device {
1637 int (*process_mad)(struct ib_device *device, 1706 int (*process_mad)(struct ib_device *device,
1638 int process_mad_flags, 1707 int process_mad_flags,
1639 u8 port_num, 1708 u8 port_num,
1640 struct ib_wc *in_wc, 1709 const struct ib_wc *in_wc,
1641 struct ib_grh *in_grh, 1710 const struct ib_grh *in_grh,
1642 struct ib_mad *in_mad, 1711 const struct ib_mad_hdr *in_mad,
1643 struct ib_mad *out_mad); 1712 size_t in_mad_size,
1713 struct ib_mad_hdr *out_mad,
1714 size_t *out_mad_size,
1715 u16 *out_mad_pkey_index);
1644 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 1716 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1645 struct ib_ucontext *ucontext, 1717 struct ib_ucontext *ucontext,
1646 struct ib_udata *udata); 1718 struct ib_udata *udata);
@@ -1673,8 +1745,17 @@ struct ib_device {
1673 char node_desc[64]; 1745 char node_desc[64];
1674 __be64 node_guid; 1746 __be64 node_guid;
1675 u32 local_dma_lkey; 1747 u32 local_dma_lkey;
1748 u16 is_switch:1;
1676 u8 node_type; 1749 u8 node_type;
1677 u8 phys_port_cnt; 1750 u8 phys_port_cnt;
1751
1752 /**
1753 * The following mandatory functions are used only at device
1754 * registration. Keep functions such as these at the end of this
1755 * structure to avoid cache line misses when accessing struct ib_device
1756 * in fast paths.
1757 */
1758 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
1678}; 1759};
1679 1760
1680struct ib_client { 1761struct ib_client {
@@ -1743,6 +1824,297 @@ int ib_query_port(struct ib_device *device,
1743enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 1824enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1744 u8 port_num); 1825 u8 port_num);
1745 1826
1827/**
1828 * rdma_cap_ib_switch - Check if the device is IB switch
1829 * @device: Device to check
1830 *
1831 * Device driver is responsible for setting is_switch bit on
1832 * in ib_device structure at init time.
1833 *
1834 * Return: true if the device is IB switch.
1835 */
1836static inline bool rdma_cap_ib_switch(const struct ib_device *device)
1837{
1838 return device->is_switch;
1839}
1840
1841/**
1842 * rdma_start_port - Return the first valid port number for the device
1843 * specified
1844 *
1845 * @device: Device to be checked
1846 *
1847 * Return start port number
1848 */
1849static inline u8 rdma_start_port(const struct ib_device *device)
1850{
1851 return rdma_cap_ib_switch(device) ? 0 : 1;
1852}
1853
1854/**
1855 * rdma_end_port - Return the last valid port number for the device
1856 * specified
1857 *
1858 * @device: Device to be checked
1859 *
1860 * Return last port number
1861 */
1862static inline u8 rdma_end_port(const struct ib_device *device)
1863{
1864 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
1865}
1866
1867static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
1868{
1869 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
1870}
1871
1872static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
1873{
1874 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
1875}
1876
1877static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
1878{
1879 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
1880}
1881
1882static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
1883{
1884 return device->port_immutable[port_num].core_cap_flags &
1885 (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
1886}
1887
1888/**
1889 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
1890 * Management Datagrams.
1891 * @device: Device to check
1892 * @port_num: Port number to check
1893 *
1894 * Management Datagrams (MAD) are a required part of the InfiniBand
1895 * specification and are supported on all InfiniBand devices. A slightly
1896 * extended version are also supported on OPA interfaces.
1897 *
1898 * Return: true if the port supports sending/receiving of MAD packets.
1899 */
1900static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
1901{
1902 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
1903}
1904
1905/**
1906 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
1907 * Management Datagrams.
1908 * @device: Device to check
1909 * @port_num: Port number to check
1910 *
1911 * Intel OmniPath devices extend and/or replace the InfiniBand Management
1912 * datagrams with their own versions. These OPA MADs share many but not all of
1913 * the characteristics of InfiniBand MADs.
1914 *
1915 * OPA MADs differ in the following ways:
1916 *
1917 * 1) MADs are variable size up to 2K
1918 * IBTA defined MADs remain fixed at 256 bytes
1919 * 2) OPA SMPs must carry valid PKeys
1920 * 3) OPA SMP packets are a different format
1921 *
1922 * Return: true if the port supports OPA MAD packet formats.
1923 */
1924static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
1925{
1926 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
1927 == RDMA_CORE_CAP_OPA_MAD;
1928}
1929
1930/**
1931 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
1932 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
1933 * @device: Device to check
1934 * @port_num: Port number to check
1935 *
1936 * Each InfiniBand node is required to provide a Subnet Management Agent
1937 * that the subnet manager can access. Prior to the fabric being fully
1938 * configured by the subnet manager, the SMA is accessed via a well known
1939 * interface called the Subnet Management Interface (SMI). This interface
1940 * uses directed route packets to communicate with the SM to get around the
1941 * chicken and egg problem of the SM needing to know what's on the fabric
1942 * in order to configure the fabric, and needing to configure the fabric in
1943 * order to send packets to the devices on the fabric. These directed
1944 * route packets do not need the fabric fully configured in order to reach
1945 * their destination. The SMI is the only method allowed to send
1946 * directed route packets on an InfiniBand fabric.
1947 *
1948 * Return: true if the port provides an SMI.
1949 */
1950static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
1951{
1952 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
1953}
1954
1955/**
1956 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
1957 * Communication Manager.
1958 * @device: Device to check
1959 * @port_num: Port number to check
1960 *
1961 * The InfiniBand Communication Manager is one of many pre-defined General
1962 * Service Agents (GSA) that are accessed via the General Service
1963 * Interface (GSI). It's role is to facilitate establishment of connections
1964 * between nodes as well as other management related tasks for established
1965 * connections.
1966 *
1967 * Return: true if the port supports an IB CM (this does not guarantee that
1968 * a CM is actually running however).
1969 */
1970static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
1971{
1972 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
1973}
1974
1975/**
1976 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
1977 * Communication Manager.
1978 * @device: Device to check
1979 * @port_num: Port number to check
1980 *
1981 * Similar to above, but specific to iWARP connections which have a different
1982 * managment protocol than InfiniBand.
1983 *
1984 * Return: true if the port supports an iWARP CM (this does not guarantee that
1985 * a CM is actually running however).
1986 */
1987static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
1988{
1989 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
1990}
1991
1992/**
1993 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
1994 * Subnet Administration.
1995 * @device: Device to check
1996 * @port_num: Port number to check
1997 *
1998 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
1999 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
2000 * fabrics, devices should resolve routes to other hosts by contacting the
2001 * SA to query the proper route.
2002 *
2003 * Return: true if the port should act as a client to the fabric Subnet
2004 * Administration interface. This does not imply that the SA service is
2005 * running locally.
2006 */
2007static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2008{
2009 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2010}
2011
2012/**
2013 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2014 * Multicast.
2015 * @device: Device to check
2016 * @port_num: Port number to check
2017 *
2018 * InfiniBand multicast registration is more complex than normal IPv4 or
2019 * IPv6 multicast registration. Each Host Channel Adapter must register
2020 * with the Subnet Manager when it wishes to join a multicast group. It
2021 * should do so only once regardless of how many queue pairs it subscribes
2022 * to this group. And it should leave the group only after all queue pairs
2023 * attached to the group have been detached.
2024 *
2025 * Return: true if the port must undertake the additional adminstrative
2026 * overhead of registering/unregistering with the SM and tracking of the
2027 * total number of queue pairs attached to the multicast group.
2028 */
2029static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2030{
2031 return rdma_cap_ib_sa(device, port_num);
2032}
2033
2034/**
2035 * rdma_cap_af_ib - Check if the port of device has the capability
2036 * Native Infiniband Address.
2037 * @device: Device to check
2038 * @port_num: Port number to check
2039 *
2040 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2041 * GID. RoCE uses a different mechanism, but still generates a GID via
2042 * a prescribed mechanism and port specific data.
2043 *
2044 * Return: true if the port uses a GID address to identify devices on the
2045 * network.
2046 */
2047static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2048{
2049 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2050}
2051
2052/**
2053 * rdma_cap_eth_ah - Check if the port of device has the capability
2054 * Ethernet Address Handle.
2055 * @device: Device to check
2056 * @port_num: Port number to check
2057 *
2058 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2059 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2060 * port. Normally, packet headers are generated by the sending host
2061 * adapter, but when sending connectionless datagrams, we must manually
2062 * inject the proper headers for the fabric we are communicating over.
2063 *
2064 * Return: true if we are running as a RoCE port and must force the
2065 * addition of a Global Route Header built from our Ethernet Address
2066 * Handle into our header list for connectionless packets.
2067 */
2068static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2069{
2070 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2071}
2072
2073/**
2074 * rdma_cap_read_multi_sge - Check if the port of device has the capability
2075 * RDMA Read Multiple Scatter-Gather Entries.
2076 * @device: Device to check
2077 * @port_num: Port number to check
2078 *
2079 * iWARP has a restriction that RDMA READ requests may only have a single
2080 * Scatter/Gather Entry (SGE) in the work request.
2081 *
2082 * NOTE: although the linux kernel currently assumes all devices are either
2083 * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
2084 * WRITEs, according to Tom Talpey, this is not accurate. There are some
2085 * devices out there that support more than a single SGE on RDMA READ
2086 * requests, but do not support the same number of SGEs as they do on
2087 * RDMA WRITE requests. The linux kernel would need rearchitecting to
2088 * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
2089 * suffice with either the device supports the same READ/WRITE SGEs, or
2090 * it only gets one READ sge.
2091 *
2092 * Return: true for any device that allows more than one SGE in RDMA READ
2093 * requests.
2094 */
2095static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
2096 u8 port_num)
2097{
2098 return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
2099}
2100
2101/**
2102 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2103 *
2104 * @device: Device
2105 * @port_num: Port number
2106 *
2107 * This MAD size includes the MAD headers and MAD payload. No other headers
2108 * are included.
2109 *
2110 * Return the max MAD size required by the Port. Will return 0 if the port
2111 * does not support MADs
2112 */
2113static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2114{
2115 return device->port_immutable[port_num].max_mad_size;
2116}
2117
1746int ib_query_gid(struct ib_device *device, 2118int ib_query_gid(struct ib_device *device,
1747 u8 port_num, int index, union ib_gid *gid); 2119 u8 port_num, int index, union ib_gid *gid);
1748 2120
@@ -1799,8 +2171,9 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1799 * @ah_attr: Returned attributes that can be used when creating an address 2171 * @ah_attr: Returned attributes that can be used when creating an address
1800 * handle for replying to the message. 2172 * handle for replying to the message.
1801 */ 2173 */
1802int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, 2174int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
1803 struct ib_grh *grh, struct ib_ah_attr *ah_attr); 2175 const struct ib_wc *wc, const struct ib_grh *grh,
2176 struct ib_ah_attr *ah_attr);
1804 2177
1805/** 2178/**
1806 * ib_create_ah_from_wc - Creates an address handle associated with the 2179 * ib_create_ah_from_wc - Creates an address handle associated with the
@@ -1814,8 +2187,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1814 * The address handle is used to reference a local or global destination 2187 * The address handle is used to reference a local or global destination
1815 * in all UD QP post sends. 2188 * in all UD QP post sends.
1816 */ 2189 */
1817struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, 2190struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
1818 struct ib_grh *grh, u8 port_num); 2191 const struct ib_grh *grh, u8 port_num);
1819 2192
1820/** 2193/**
1821 * ib_modify_ah - Modifies the address vector associated with an address 2194 * ib_modify_ah - Modifies the address vector associated with an address
@@ -2011,16 +2384,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
2011 * asynchronous event not associated with a completion occurs on the CQ. 2384 * asynchronous event not associated with a completion occurs on the CQ.
2012 * @cq_context: Context associated with the CQ returned to the user via 2385 * @cq_context: Context associated with the CQ returned to the user via
2013 * the associated completion and event handlers. 2386 * the associated completion and event handlers.
2014 * @cqe: The minimum size of the CQ. 2387 * @cq_attr: The attributes the CQ should be created upon.
2015 * @comp_vector - Completion vector used to signal completion events.
2016 * Must be >= 0 and < context->num_comp_vectors.
2017 * 2388 *
2018 * Users can examine the cq structure to determine the actual CQ size. 2389 * Users can examine the cq structure to determine the actual CQ size.
2019 */ 2390 */
2020struct ib_cq *ib_create_cq(struct ib_device *device, 2391struct ib_cq *ib_create_cq(struct ib_device *device,
2021 ib_comp_handler comp_handler, 2392 ib_comp_handler comp_handler,
2022 void (*event_handler)(struct ib_event *, void *), 2393 void (*event_handler)(struct ib_event *, void *),
2023 void *cq_context, int cqe, int comp_vector); 2394 void *cq_context,
2395 const struct ib_cq_init_attr *cq_attr);
2024 2396
2025/** 2397/**
2026 * ib_resize_cq - Modifies the capacity of the CQ. 2398 * ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 1017e0bdf8ba..036bd2772662 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -91,6 +91,7 @@ struct iw_cm_id {
91 /* Used by provider to add and remove refs on IW cm_id */ 91 /* Used by provider to add and remove refs on IW cm_id */
92 void (*add_ref)(struct iw_cm_id *); 92 void (*add_ref)(struct iw_cm_id *);
93 void (*rem_ref)(struct iw_cm_id *); 93 void (*rem_ref)(struct iw_cm_id *);
94 u8 tos;
94}; 95};
95 96
96struct iw_cm_conn_param { 97struct iw_cm_conn_param {
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
new file mode 100644
index 000000000000..29063e84c253
--- /dev/null
+++ b/include/rdma/opa_smi.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright (c) 2014 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#if !defined(OPA_SMI_H)
34#define OPA_SMI_H
35
36#include <rdma/ib_mad.h>
37#include <rdma/ib_smi.h>
38
39#define OPA_SMP_LID_DATA_SIZE 2016
40#define OPA_SMP_DR_DATA_SIZE 1872
41#define OPA_SMP_MAX_PATH_HOPS 64
42
43#define OPA_SMI_CLASS_VERSION 0x80
44
45#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
46
47struct opa_smp {
48 u8 base_version;
49 u8 mgmt_class;
50 u8 class_version;
51 u8 method;
52 __be16 status;
53 u8 hop_ptr;
54 u8 hop_cnt;
55 __be64 tid;
56 __be16 attr_id;
57 __be16 resv;
58 __be32 attr_mod;
59 __be64 mkey;
60 union {
61 struct {
62 uint8_t data[OPA_SMP_LID_DATA_SIZE];
63 } lid;
64 struct {
65 __be32 dr_slid;
66 __be32 dr_dlid;
67 u8 initial_path[OPA_SMP_MAX_PATH_HOPS];
68 u8 return_path[OPA_SMP_MAX_PATH_HOPS];
69 u8 reserved[8];
70 u8 data[OPA_SMP_DR_DATA_SIZE];
71 } dr;
72 } route;
73} __packed;
74
75
76static inline u8
77opa_get_smp_direction(struct opa_smp *smp)
78{
79 return ib_get_smp_direction((struct ib_smp *)smp);
80}
81
82static inline u8 *opa_get_smp_data(struct opa_smp *smp)
83{
84 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
85 return smp->route.dr.data;
86
87 return smp->route.lid.data;
88}
89
90static inline size_t opa_get_smp_data_size(struct opa_smp *smp)
91{
92 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
93 return sizeof(smp->route.dr.data);
94
95 return sizeof(smp->route.lid.data);
96}
97
98static inline size_t opa_get_smp_header_size(struct opa_smp *smp)
99{
100 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
101 return sizeof(*smp) - sizeof(smp->route.dr.data);
102
103 return sizeof(*smp) - sizeof(smp->route.lid.data);
104}
105
106#endif /* OPA_SMI_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 1ed2088dc9f5..c92522c192d2 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -62,6 +62,8 @@ enum rdma_cm_event_type {
62 RDMA_CM_EVENT_TIMEWAIT_EXIT 62 RDMA_CM_EVENT_TIMEWAIT_EXIT
63}; 63};
64 64
65__attribute_const__ const char *rdma_event_msg(enum rdma_cm_event_type event);
66
65enum rdma_port_space { 67enum rdma_port_space {
66 RDMA_PS_SDP = 0x0001, 68 RDMA_PS_SDP = 0x0001,
67 RDMA_PS_IPOIB = 0x0002, 69 RDMA_PS_IPOIB = 0x0002,
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index d0a66aa1868d..e0a3398b1547 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -1,9 +1,6 @@
1/* 1/*
2 * This header file contains public constants and structures used by 2 * This header file contains public constants and structures used by
3 * the scsi code for linux. 3 * the SCSI initiator code.
4 *
5 * For documentation on the OPCODES, MESSAGES, and SENSE values,
6 * please consult the SCSI standard.
7 */ 4 */
8#ifndef _SCSI_SCSI_H 5#ifndef _SCSI_SCSI_H
9#define _SCSI_SCSI_H 6#define _SCSI_SCSI_H
@@ -11,6 +8,8 @@
11#include <linux/types.h> 8#include <linux/types.h>
12#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
13#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <scsi/scsi_common.h>
12#include <scsi/scsi_proto.h>
14 13
15struct scsi_cmnd; 14struct scsi_cmnd;
16 15
@@ -49,187 +48,6 @@ enum scsi_timeouts {
49 */ 48 */
50#define SCAN_WILD_CARD ~0 49#define SCAN_WILD_CARD ~0
51 50
52/*
53 * SCSI opcodes
54 */
55
56#define TEST_UNIT_READY 0x00
57#define REZERO_UNIT 0x01
58#define REQUEST_SENSE 0x03
59#define FORMAT_UNIT 0x04
60#define READ_BLOCK_LIMITS 0x05
61#define REASSIGN_BLOCKS 0x07
62#define INITIALIZE_ELEMENT_STATUS 0x07
63#define READ_6 0x08
64#define WRITE_6 0x0a
65#define SEEK_6 0x0b
66#define READ_REVERSE 0x0f
67#define WRITE_FILEMARKS 0x10
68#define SPACE 0x11
69#define INQUIRY 0x12
70#define RECOVER_BUFFERED_DATA 0x14
71#define MODE_SELECT 0x15
72#define RESERVE 0x16
73#define RELEASE 0x17
74#define COPY 0x18
75#define ERASE 0x19
76#define MODE_SENSE 0x1a
77#define START_STOP 0x1b
78#define RECEIVE_DIAGNOSTIC 0x1c
79#define SEND_DIAGNOSTIC 0x1d
80#define ALLOW_MEDIUM_REMOVAL 0x1e
81
82#define READ_FORMAT_CAPACITIES 0x23
83#define SET_WINDOW 0x24
84#define READ_CAPACITY 0x25
85#define READ_10 0x28
86#define WRITE_10 0x2a
87#define SEEK_10 0x2b
88#define POSITION_TO_ELEMENT 0x2b
89#define WRITE_VERIFY 0x2e
90#define VERIFY 0x2f
91#define SEARCH_HIGH 0x30
92#define SEARCH_EQUAL 0x31
93#define SEARCH_LOW 0x32
94#define SET_LIMITS 0x33
95#define PRE_FETCH 0x34
96#define READ_POSITION 0x34
97#define SYNCHRONIZE_CACHE 0x35
98#define LOCK_UNLOCK_CACHE 0x36
99#define READ_DEFECT_DATA 0x37
100#define MEDIUM_SCAN 0x38
101#define COMPARE 0x39
102#define COPY_VERIFY 0x3a
103#define WRITE_BUFFER 0x3b
104#define READ_BUFFER 0x3c
105#define UPDATE_BLOCK 0x3d
106#define READ_LONG 0x3e
107#define WRITE_LONG 0x3f
108#define CHANGE_DEFINITION 0x40
109#define WRITE_SAME 0x41
110#define UNMAP 0x42
111#define READ_TOC 0x43
112#define READ_HEADER 0x44
113#define GET_EVENT_STATUS_NOTIFICATION 0x4a
114#define LOG_SELECT 0x4c
115#define LOG_SENSE 0x4d
116#define XDWRITEREAD_10 0x53
117#define MODE_SELECT_10 0x55
118#define RESERVE_10 0x56
119#define RELEASE_10 0x57
120#define MODE_SENSE_10 0x5a
121#define PERSISTENT_RESERVE_IN 0x5e
122#define PERSISTENT_RESERVE_OUT 0x5f
123#define VARIABLE_LENGTH_CMD 0x7f
124#define REPORT_LUNS 0xa0
125#define SECURITY_PROTOCOL_IN 0xa2
126#define MAINTENANCE_IN 0xa3
127#define MAINTENANCE_OUT 0xa4
128#define MOVE_MEDIUM 0xa5
129#define EXCHANGE_MEDIUM 0xa6
130#define READ_12 0xa8
131#define SERVICE_ACTION_OUT_12 0xa9
132#define WRITE_12 0xaa
133#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
134#define SERVICE_ACTION_IN_12 0xab
135#define WRITE_VERIFY_12 0xae
136#define VERIFY_12 0xaf
137#define SEARCH_HIGH_12 0xb0
138#define SEARCH_EQUAL_12 0xb1
139#define SEARCH_LOW_12 0xb2
140#define SECURITY_PROTOCOL_OUT 0xb5
141#define READ_ELEMENT_STATUS 0xb8
142#define SEND_VOLUME_TAG 0xb6
143#define WRITE_LONG_2 0xea
144#define EXTENDED_COPY 0x83
145#define RECEIVE_COPY_RESULTS 0x84
146#define ACCESS_CONTROL_IN 0x86
147#define ACCESS_CONTROL_OUT 0x87
148#define READ_16 0x88
149#define COMPARE_AND_WRITE 0x89
150#define WRITE_16 0x8a
151#define READ_ATTRIBUTE 0x8c
152#define WRITE_ATTRIBUTE 0x8d
153#define VERIFY_16 0x8f
154#define SYNCHRONIZE_CACHE_16 0x91
155#define WRITE_SAME_16 0x93
156#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
157#define SERVICE_ACTION_IN_16 0x9e
158#define SERVICE_ACTION_OUT_16 0x9f
159/* values for service action in */
160#define SAI_READ_CAPACITY_16 0x10
161#define SAI_GET_LBA_STATUS 0x12
162#define SAI_REPORT_REFERRALS 0x13
163/* values for VARIABLE_LENGTH_CMD service action codes
164 * see spc4r17 Section D.3.5, table D.7 and D.8 */
165#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
166/* values for maintenance in */
167#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
168#define MI_REPORT_TARGET_PGS 0x0a
169#define MI_REPORT_ALIASES 0x0b
170#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
171#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
172#define MI_REPORT_PRIORITY 0x0e
173#define MI_REPORT_TIMESTAMP 0x0f
174#define MI_MANAGEMENT_PROTOCOL_IN 0x10
175/* value for MI_REPORT_TARGET_PGS ext header */
176#define MI_EXT_HDR_PARAM_FMT 0x20
177/* values for maintenance out */
178#define MO_SET_IDENTIFYING_INFORMATION 0x06
179#define MO_SET_TARGET_PGS 0x0a
180#define MO_CHANGE_ALIASES 0x0b
181#define MO_SET_PRIORITY 0x0e
182#define MO_SET_TIMESTAMP 0x0f
183#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
184/* values for variable length command */
185#define XDREAD_32 0x03
186#define XDWRITE_32 0x04
187#define XPWRITE_32 0x06
188#define XDWRITEREAD_32 0x07
189#define READ_32 0x09
190#define VERIFY_32 0x0a
191#define WRITE_32 0x0b
192#define WRITE_SAME_32 0x0d
193
194/* Values for T10/04-262r7 */
195#define ATA_16 0x85 /* 16-byte pass-thru */
196#define ATA_12 0xa1 /* 12-byte pass-thru */
197
198/* Vendor specific CDBs start here */
199#define VENDOR_SPECIFIC_CDB 0xc0
200
201/*
202 * SCSI command lengths
203 */
204
205#define SCSI_MAX_VARLEN_CDB_SIZE 260
206
207/* defined in T10 SCSI Primary Commands-2 (SPC2) */
208struct scsi_varlen_cdb_hdr {
209 __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
210 __u8 control;
211 __u8 misc[5];
212 __u8 additional_cdb_length; /* total cdb length - 8 */
213 __be16 service_action;
214 /* service specific data follows */
215};
216
217static inline unsigned
218scsi_varlen_cdb_length(const void *hdr)
219{
220 return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
221}
222
223extern const unsigned char scsi_command_size_tbl[8];
224#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
225
226static inline unsigned
227scsi_command_size(const unsigned char *cmnd)
228{
229 return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
230 scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
231}
232
233#ifdef CONFIG_ACPI 51#ifdef CONFIG_ACPI
234struct acpi_bus_type; 52struct acpi_bus_type;
235 53
@@ -240,22 +58,6 @@ extern void
240scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus); 58scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus);
241#endif 59#endif
242 60
243/*
244 * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
245 * T10/1561-D Revision 4 Draft dated 7th November 2002.
246 */
247#define SAM_STAT_GOOD 0x00
248#define SAM_STAT_CHECK_CONDITION 0x02
249#define SAM_STAT_CONDITION_MET 0x04
250#define SAM_STAT_BUSY 0x08
251#define SAM_STAT_INTERMEDIATE 0x10
252#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
253#define SAM_STAT_RESERVATION_CONFLICT 0x18
254#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
255#define SAM_STAT_TASK_SET_FULL 0x28
256#define SAM_STAT_ACA_ACTIVE 0x30
257#define SAM_STAT_TASK_ABORTED 0x40
258
259/** scsi_status_is_good - check the status return. 61/** scsi_status_is_good - check the status return.
260 * 62 *
261 * @status: the status passed up from the driver (including host and 63 * @status: the status passed up from the driver (including host and
@@ -279,86 +81,6 @@ static inline int scsi_status_is_good(int status)
279 (status == SAM_STAT_COMMAND_TERMINATED)); 81 (status == SAM_STAT_COMMAND_TERMINATED));
280} 82}
281 83
282/*
283 * Status codes. These are deprecated as they are shifted 1 bit right
284 * from those found in the SCSI standards. This causes confusion for
285 * applications that are ported to several OSes. Prefer SAM Status codes
286 * above.
287 */
288
289#define GOOD 0x00
290#define CHECK_CONDITION 0x01
291#define CONDITION_GOOD 0x02
292#define BUSY 0x04
293#define INTERMEDIATE_GOOD 0x08
294#define INTERMEDIATE_C_GOOD 0x0a
295#define RESERVATION_CONFLICT 0x0c
296#define COMMAND_TERMINATED 0x11
297#define QUEUE_FULL 0x14
298#define ACA_ACTIVE 0x18
299#define TASK_ABORTED 0x20
300
301#define STATUS_MASK 0xfe
302
303/*
304 * SENSE KEYS
305 */
306
307#define NO_SENSE 0x00
308#define RECOVERED_ERROR 0x01
309#define NOT_READY 0x02
310#define MEDIUM_ERROR 0x03
311#define HARDWARE_ERROR 0x04
312#define ILLEGAL_REQUEST 0x05
313#define UNIT_ATTENTION 0x06
314#define DATA_PROTECT 0x07
315#define BLANK_CHECK 0x08
316#define COPY_ABORTED 0x0a
317#define ABORTED_COMMAND 0x0b
318#define VOLUME_OVERFLOW 0x0d
319#define MISCOMPARE 0x0e
320
321
322/*
323 * DEVICE TYPES
324 * Please keep them in 0x%02x format for $MODALIAS to work
325 */
326
327#define TYPE_DISK 0x00
328#define TYPE_TAPE 0x01
329#define TYPE_PRINTER 0x02
330#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
331#define TYPE_WORM 0x04 /* Treated as ROM by our system */
332#define TYPE_ROM 0x05
333#define TYPE_SCANNER 0x06
334#define TYPE_MOD 0x07 /* Magneto-optical disk -
335 * - treated as TYPE_DISK */
336#define TYPE_MEDIUM_CHANGER 0x08
337#define TYPE_COMM 0x09 /* Communications device */
338#define TYPE_RAID 0x0c
339#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
340#define TYPE_RBC 0x0e
341#define TYPE_OSD 0x11
342#define TYPE_ZBC 0x14
343#define TYPE_WLUN 0x1e /* well-known logical unit */
344#define TYPE_NO_LUN 0x7f
345
346/* SCSI protocols; these are taken from SPC-3 section 7.5 */
347enum scsi_protocol {
348 SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
349 SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
350 SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
351 SCSI_PROTOCOL_SBP = 3, /* firewire */
352 SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
353 SCSI_PROTOCOL_ISCSI = 5,
354 SCSI_PROTOCOL_SAS = 6,
355 SCSI_PROTOCOL_ADT = 7, /* Media Changers */
356 SCSI_PROTOCOL_ATA = 8,
357 SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
358};
359
360/* Returns a human-readable name for the device */
361extern const char * scsi_device_type(unsigned type);
362 84
363/* 85/*
364 * standard mode-select header prepended to all mode-select commands 86 * standard mode-select header prepended to all mode-select commands
@@ -380,13 +102,6 @@ struct ccs_modesel_head {
380}; 102};
381 103
382/* 104/*
383 * ScsiLun: 8 byte LUN.
384 */
385struct scsi_lun {
386 __u8 scsi_lun[8];
387};
388
389/*
390 * The Well Known LUNS (SAM-3) in our int representation of a LUN 105 * The Well Known LUNS (SAM-3) in our int representation of a LUN
391 */ 106 */
392#define SCSI_W_LUN_BASE 0xc100 107#define SCSI_W_LUN_BASE 0xc100
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
new file mode 100644
index 000000000000..676b03b78e57
--- /dev/null
+++ b/include/scsi/scsi_common.h
@@ -0,0 +1,64 @@
1/*
2 * Functions used by both the SCSI initiator code and the SCSI target code.
3 */
4
5#ifndef _SCSI_COMMON_H_
6#define _SCSI_COMMON_H_
7
8#include <linux/types.h>
9#include <scsi/scsi_proto.h>
10
11static inline unsigned
12scsi_varlen_cdb_length(const void *hdr)
13{
14 return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
15}
16
17extern const unsigned char scsi_command_size_tbl[8];
18#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
19
20static inline unsigned
21scsi_command_size(const unsigned char *cmnd)
22{
23 return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
24 scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
25}
26
27/* Returns a human-readable name for the device */
28extern const char *scsi_device_type(unsigned type);
29
30extern void int_to_scsilun(u64, struct scsi_lun *);
31extern u64 scsilun_to_int(struct scsi_lun *);
32
33/*
34 * This is a slightly modified SCSI sense "descriptor" format header.
35 * The addition is to allow the 0x70 and 0x71 response codes. The idea
36 * is to place the salient data from either "fixed" or "descriptor" sense
37 * format into one structure to ease application processing.
38 *
39 * The original sense buffer should be kept around for those cases
40 * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
41 */
42struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
43 u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
44 u8 sense_key;
45 u8 asc;
46 u8 ascq;
47 u8 byte4;
48 u8 byte5;
49 u8 byte6;
50 u8 additional_length; /* always 0 for fixed sense format */
51};
52
53static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
54{
55 if (!sshdr)
56 return false;
57
58 return (sshdr->response_code & 0x70) == 0x70;
59}
60
61extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
62 struct scsi_sense_hdr *sshdr);
63
64#endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a4c9336811d1..ae84b2214d40 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -413,8 +413,6 @@ extern void scsi_target_reap(struct scsi_target *);
413extern void scsi_target_block(struct device *); 413extern void scsi_target_block(struct device *);
414extern void scsi_target_unblock(struct device *, enum scsi_device_state); 414extern void scsi_target_unblock(struct device *, enum scsi_device_state);
415extern void scsi_remove_target(struct device *); 415extern void scsi_remove_target(struct device *);
416extern void int_to_scsilun(u64, struct scsi_lun *);
417extern u64 scsilun_to_int(struct scsi_lun *);
418extern const char *scsi_device_state_name(enum scsi_device_state); 416extern const char *scsi_device_state_name(enum scsi_device_state);
419extern int scsi_is_sdev_device(const struct device *); 417extern int scsi_is_sdev_device(const struct device *);
420extern int scsi_is_target_device(const struct device *); 418extern int scsi_is_target_device(const struct device *);
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 5a4bb5bb66b3..4942710ef720 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -7,43 +7,12 @@
7struct scsi_device; 7struct scsi_device;
8struct Scsi_Host; 8struct Scsi_Host;
9 9
10/*
11 * This is a slightly modified SCSI sense "descriptor" format header.
12 * The addition is to allow the 0x70 and 0x71 response codes. The idea
13 * is to place the salient data from either "fixed" or "descriptor" sense
14 * format into one structure to ease application processing.
15 *
16 * The original sense buffer should be kept around for those cases
17 * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
18 */
19struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
20 u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
21 u8 sense_key;
22 u8 asc;
23 u8 ascq;
24 u8 byte4;
25 u8 byte5;
26 u8 byte6;
27 u8 additional_length; /* always 0 for fixed sense format */
28};
29
30static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
31{
32 if (!sshdr)
33 return false;
34
35 return (sshdr->response_code & 0x70) == 0x70;
36}
37
38
39extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 10extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
40 struct list_head *done_q); 11 struct list_head *done_q);
41extern void scsi_eh_flush_done_q(struct list_head *done_q); 12extern void scsi_eh_flush_done_q(struct list_head *done_q);
42extern void scsi_report_bus_reset(struct Scsi_Host *, int); 13extern void scsi_report_bus_reset(struct Scsi_Host *, int);
43extern void scsi_report_device_reset(struct Scsi_Host *, int, int); 14extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
44extern int scsi_block_when_processing_errors(struct scsi_device *); 15extern int scsi_block_when_processing_errors(struct scsi_device *);
45extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
46 struct scsi_sense_hdr *sshdr);
47extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, 16extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
48 struct scsi_sense_hdr *sshdr); 17 struct scsi_sense_hdr *sshdr);
49 18
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
new file mode 100644
index 000000000000..a9fbf1b38e71
--- /dev/null
+++ b/include/scsi/scsi_proto.h
@@ -0,0 +1,281 @@
1/*
2 * This header file contains public constants and structures used by
3 * both the SCSI initiator and the SCSI target code.
4 *
5 * For documentation on the OPCODES, MESSAGES, and SENSE values,
6 * please consult the SCSI standard.
7 */
8
9#ifndef _SCSI_PROTO_H_
10#define _SCSI_PROTO_H_
11
12#include <linux/types.h>
13
14/*
15 * SCSI opcodes
16 */
17
18#define TEST_UNIT_READY 0x00
19#define REZERO_UNIT 0x01
20#define REQUEST_SENSE 0x03
21#define FORMAT_UNIT 0x04
22#define READ_BLOCK_LIMITS 0x05
23#define REASSIGN_BLOCKS 0x07
24#define INITIALIZE_ELEMENT_STATUS 0x07
25#define READ_6 0x08
26#define WRITE_6 0x0a
27#define SEEK_6 0x0b
28#define READ_REVERSE 0x0f
29#define WRITE_FILEMARKS 0x10
30#define SPACE 0x11
31#define INQUIRY 0x12
32#define RECOVER_BUFFERED_DATA 0x14
33#define MODE_SELECT 0x15
34#define RESERVE 0x16
35#define RELEASE 0x17
36#define COPY 0x18
37#define ERASE 0x19
38#define MODE_SENSE 0x1a
39#define START_STOP 0x1b
40#define RECEIVE_DIAGNOSTIC 0x1c
41#define SEND_DIAGNOSTIC 0x1d
42#define ALLOW_MEDIUM_REMOVAL 0x1e
43
44#define READ_FORMAT_CAPACITIES 0x23
45#define SET_WINDOW 0x24
46#define READ_CAPACITY 0x25
47#define READ_10 0x28
48#define WRITE_10 0x2a
49#define SEEK_10 0x2b
50#define POSITION_TO_ELEMENT 0x2b
51#define WRITE_VERIFY 0x2e
52#define VERIFY 0x2f
53#define SEARCH_HIGH 0x30
54#define SEARCH_EQUAL 0x31
55#define SEARCH_LOW 0x32
56#define SET_LIMITS 0x33
57#define PRE_FETCH 0x34
58#define READ_POSITION 0x34
59#define SYNCHRONIZE_CACHE 0x35
60#define LOCK_UNLOCK_CACHE 0x36
61#define READ_DEFECT_DATA 0x37
62#define MEDIUM_SCAN 0x38
63#define COMPARE 0x39
64#define COPY_VERIFY 0x3a
65#define WRITE_BUFFER 0x3b
66#define READ_BUFFER 0x3c
67#define UPDATE_BLOCK 0x3d
68#define READ_LONG 0x3e
69#define WRITE_LONG 0x3f
70#define CHANGE_DEFINITION 0x40
71#define WRITE_SAME 0x41
72#define UNMAP 0x42
73#define READ_TOC 0x43
74#define READ_HEADER 0x44
75#define GET_EVENT_STATUS_NOTIFICATION 0x4a
76#define LOG_SELECT 0x4c
77#define LOG_SENSE 0x4d
78#define XDWRITEREAD_10 0x53
79#define MODE_SELECT_10 0x55
80#define RESERVE_10 0x56
81#define RELEASE_10 0x57
82#define MODE_SENSE_10 0x5a
83#define PERSISTENT_RESERVE_IN 0x5e
84#define PERSISTENT_RESERVE_OUT 0x5f
85#define VARIABLE_LENGTH_CMD 0x7f
86#define REPORT_LUNS 0xa0
87#define SECURITY_PROTOCOL_IN 0xa2
88#define MAINTENANCE_IN 0xa3
89#define MAINTENANCE_OUT 0xa4
90#define MOVE_MEDIUM 0xa5
91#define EXCHANGE_MEDIUM 0xa6
92#define READ_12 0xa8
93#define SERVICE_ACTION_OUT_12 0xa9
94#define WRITE_12 0xaa
95#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
96#define SERVICE_ACTION_IN_12 0xab
97#define WRITE_VERIFY_12 0xae
98#define VERIFY_12 0xaf
99#define SEARCH_HIGH_12 0xb0
100#define SEARCH_EQUAL_12 0xb1
101#define SEARCH_LOW_12 0xb2
102#define SECURITY_PROTOCOL_OUT 0xb5
103#define READ_ELEMENT_STATUS 0xb8
104#define SEND_VOLUME_TAG 0xb6
105#define WRITE_LONG_2 0xea
106#define EXTENDED_COPY 0x83
107#define RECEIVE_COPY_RESULTS 0x84
108#define ACCESS_CONTROL_IN 0x86
109#define ACCESS_CONTROL_OUT 0x87
110#define READ_16 0x88
111#define COMPARE_AND_WRITE 0x89
112#define WRITE_16 0x8a
113#define READ_ATTRIBUTE 0x8c
114#define WRITE_ATTRIBUTE 0x8d
115#define VERIFY_16 0x8f
116#define SYNCHRONIZE_CACHE_16 0x91
117#define WRITE_SAME_16 0x93
118#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
119#define SERVICE_ACTION_IN_16 0x9e
120#define SERVICE_ACTION_OUT_16 0x9f
121/* values for service action in */
122#define SAI_READ_CAPACITY_16 0x10
123#define SAI_GET_LBA_STATUS 0x12
124#define SAI_REPORT_REFERRALS 0x13
125/* values for VARIABLE_LENGTH_CMD service action codes
126 * see spc4r17 Section D.3.5, table D.7 and D.8 */
127#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
128/* values for maintenance in */
129#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
130#define MI_REPORT_TARGET_PGS 0x0a
131#define MI_REPORT_ALIASES 0x0b
132#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
133#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
134#define MI_REPORT_PRIORITY 0x0e
135#define MI_REPORT_TIMESTAMP 0x0f
136#define MI_MANAGEMENT_PROTOCOL_IN 0x10
137/* value for MI_REPORT_TARGET_PGS ext header */
138#define MI_EXT_HDR_PARAM_FMT 0x20
139/* values for maintenance out */
140#define MO_SET_IDENTIFYING_INFORMATION 0x06
141#define MO_SET_TARGET_PGS 0x0a
142#define MO_CHANGE_ALIASES 0x0b
143#define MO_SET_PRIORITY 0x0e
144#define MO_SET_TIMESTAMP 0x0f
145#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
146/* values for variable length command */
147#define XDREAD_32 0x03
148#define XDWRITE_32 0x04
149#define XPWRITE_32 0x06
150#define XDWRITEREAD_32 0x07
151#define READ_32 0x09
152#define VERIFY_32 0x0a
153#define WRITE_32 0x0b
154#define WRITE_SAME_32 0x0d
155
156/* Values for T10/04-262r7 */
157#define ATA_16 0x85 /* 16-byte pass-thru */
158#define ATA_12 0xa1 /* 12-byte pass-thru */
159
160/* Vendor specific CDBs start here */
161#define VENDOR_SPECIFIC_CDB 0xc0
162
163/*
164 * SCSI command lengths
165 */
166
167#define SCSI_MAX_VARLEN_CDB_SIZE 260
168
169/* defined in T10 SCSI Primary Commands-2 (SPC2) */
170struct scsi_varlen_cdb_hdr {
171 __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
172 __u8 control;
173 __u8 misc[5];
174 __u8 additional_cdb_length; /* total cdb length - 8 */
175 __be16 service_action;
176 /* service specific data follows */
177};
178
179/*
180 * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
181 * T10/1561-D Revision 4 Draft dated 7th November 2002.
182 */
183#define SAM_STAT_GOOD 0x00
184#define SAM_STAT_CHECK_CONDITION 0x02
185#define SAM_STAT_CONDITION_MET 0x04
186#define SAM_STAT_BUSY 0x08
187#define SAM_STAT_INTERMEDIATE 0x10
188#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
189#define SAM_STAT_RESERVATION_CONFLICT 0x18
190#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
191#define SAM_STAT_TASK_SET_FULL 0x28
192#define SAM_STAT_ACA_ACTIVE 0x30
193#define SAM_STAT_TASK_ABORTED 0x40
194
195/*
196 * Status codes. These are deprecated as they are shifted 1 bit right
197 * from those found in the SCSI standards. This causes confusion for
198 * applications that are ported to several OSes. Prefer SAM Status codes
199 * above.
200 */
201
202#define GOOD 0x00
203#define CHECK_CONDITION 0x01
204#define CONDITION_GOOD 0x02
205#define BUSY 0x04
206#define INTERMEDIATE_GOOD 0x08
207#define INTERMEDIATE_C_GOOD 0x0a
208#define RESERVATION_CONFLICT 0x0c
209#define COMMAND_TERMINATED 0x11
210#define QUEUE_FULL 0x14
211#define ACA_ACTIVE 0x18
212#define TASK_ABORTED 0x20
213
214#define STATUS_MASK 0xfe
215
216/*
217 * SENSE KEYS
218 */
219
220#define NO_SENSE 0x00
221#define RECOVERED_ERROR 0x01
222#define NOT_READY 0x02
223#define MEDIUM_ERROR 0x03
224#define HARDWARE_ERROR 0x04
225#define ILLEGAL_REQUEST 0x05
226#define UNIT_ATTENTION 0x06
227#define DATA_PROTECT 0x07
228#define BLANK_CHECK 0x08
229#define COPY_ABORTED 0x0a
230#define ABORTED_COMMAND 0x0b
231#define VOLUME_OVERFLOW 0x0d
232#define MISCOMPARE 0x0e
233
234
235/*
236 * DEVICE TYPES
237 * Please keep them in 0x%02x format for $MODALIAS to work
238 */
239
240#define TYPE_DISK 0x00
241#define TYPE_TAPE 0x01
242#define TYPE_PRINTER 0x02
243#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
244#define TYPE_WORM 0x04 /* Treated as ROM by our system */
245#define TYPE_ROM 0x05
246#define TYPE_SCANNER 0x06
247#define TYPE_MOD 0x07 /* Magneto-optical disk -
248 * - treated as TYPE_DISK */
249#define TYPE_MEDIUM_CHANGER 0x08
250#define TYPE_COMM 0x09 /* Communications device */
251#define TYPE_RAID 0x0c
252#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
253#define TYPE_RBC 0x0e
254#define TYPE_OSD 0x11
255#define TYPE_ZBC 0x14
256#define TYPE_WLUN 0x1e /* well-known logical unit */
257#define TYPE_NO_LUN 0x7f
258
259/* SCSI protocols; these are taken from SPC-3 section 7.5 */
260enum scsi_protocol {
261 SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
262 SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
263 SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
264 SCSI_PROTOCOL_SBP = 3, /* firewire */
265 SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
266 SCSI_PROTOCOL_ISCSI = 5,
267 SCSI_PROTOCOL_SAS = 6,
268 SCSI_PROTOCOL_ADT = 7, /* Media Changers */
269 SCSI_PROTOCOL_ATA = 8,
270 SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
271};
272
273/*
274 * ScsiLun: 8 byte LUN.
275 */
276struct scsi_lun {
277 __u8 scsi_lun[8];
278};
279
280
281#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index cdb05dd1d440..d40d3ef25707 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
119extern void srp_rport_del(struct srp_rport *); 119extern void srp_rport_del(struct srp_rport *);
120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, 120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
121 int dev_loss_tmo); 121 int dev_loss_tmo);
122int srp_parse_tmo(int *tmo, const char *buf);
122extern int srp_reconnect_rport(struct srp_rport *rport); 123extern int srp_reconnect_rport(struct srp_rport *rport);
123extern void srp_start_tl_fail_timers(struct srp_rport *rport); 124extern void srp_start_tl_fail_timers(struct srp_rport *rport);
124extern void srp_remove_host(struct Scsi_Host *); 125extern void srp_remove_host(struct Scsi_Host *);
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 1ae84db4c9fb..5be834de491a 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45#include <scsi/scsi.h>
45 46
46enum { 47enum {
47 SRP_LOGIN_REQ = 0x00, 48 SRP_LOGIN_REQ = 0x00,
@@ -179,7 +180,7 @@ struct srp_tsk_mgmt {
179 u8 reserved1[6]; 180 u8 reserved1[6];
180 u64 tag; 181 u64 tag;
181 u8 reserved2[4]; 182 u8 reserved2[4];
182 __be64 lun __attribute__((packed)); 183 struct scsi_lun lun;
183 u8 reserved3[2]; 184 u8 reserved3[2];
184 u8 tsk_mgmt_func; 185 u8 tsk_mgmt_func;
185 u8 reserved4; 186 u8 reserved4;
@@ -200,7 +201,7 @@ struct srp_cmd {
200 u8 data_in_desc_cnt; 201 u8 data_in_desc_cnt;
201 u64 tag; 202 u64 tag;
202 u8 reserved2[4]; 203 u8 reserved2[4];
203 __be64 lun __attribute__((packed)); 204 struct scsi_lun lun;
204 u8 reserved3; 205 u8 reserved3;
205 u8 task_attr; 206 u8 task_attr;
206 u8 reserved4; 207 u8 reserved4;
@@ -265,7 +266,7 @@ struct srp_aer_req {
265 __be32 req_lim_delta; 266 __be32 req_lim_delta;
266 u64 tag; 267 u64 tag;
267 u32 reserved2; 268 u32 reserved2;
268 __be64 lun; 269 struct scsi_lun lun;
269 __be32 sense_data_len; 270 __be32 sense_data_len;
270 u32 reserved3; 271 u32 reserved3;
271 u8 sense_data[0]; 272 u8 sense_data[0];
diff --git a/include/soc/at91/at91rm9200_sdramc.h b/include/soc/at91/at91rm9200_sdramc.h
deleted file mode 100644
index aa047f458f1b..000000000000
--- a/include/soc/at91/at91rm9200_sdramc.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h
3 *
4 * Copyright (C) 2005 Ivan Kokshaysky
5 * Copyright (C) SAN People
6 *
7 * Memory Controllers (SDRAMC only) - System peripherals registers.
8 * Based on AT91RM9200 datasheet revision E.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef AT91RM9200_SDRAMC_H
17#define AT91RM9200_SDRAMC_H
18
19/* SDRAM Controller registers */
20#define AT91RM9200_SDRAMC_MR 0x90 /* Mode Register */
21#define AT91RM9200_SDRAMC_MODE (0xf << 0) /* Command Mode */
22#define AT91RM9200_SDRAMC_MODE_NORMAL (0 << 0)
23#define AT91RM9200_SDRAMC_MODE_NOP (1 << 0)
24#define AT91RM9200_SDRAMC_MODE_PRECHARGE (2 << 0)
25#define AT91RM9200_SDRAMC_MODE_LMR (3 << 0)
26#define AT91RM9200_SDRAMC_MODE_REFRESH (4 << 0)
27#define AT91RM9200_SDRAMC_DBW (1 << 4) /* Data Bus Width */
28#define AT91RM9200_SDRAMC_DBW_32 (0 << 4)
29#define AT91RM9200_SDRAMC_DBW_16 (1 << 4)
30
31#define AT91RM9200_SDRAMC_TR 0x94 /* Refresh Timer Register */
32#define AT91RM9200_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Count */
33
34#define AT91RM9200_SDRAMC_CR 0x98 /* Configuration Register */
35#define AT91RM9200_SDRAMC_NC (3 << 0) /* Number of Column Bits */
36#define AT91RM9200_SDRAMC_NC_8 (0 << 0)
37#define AT91RM9200_SDRAMC_NC_9 (1 << 0)
38#define AT91RM9200_SDRAMC_NC_10 (2 << 0)
39#define AT91RM9200_SDRAMC_NC_11 (3 << 0)
40#define AT91RM9200_SDRAMC_NR (3 << 2) /* Number of Row Bits */
41#define AT91RM9200_SDRAMC_NR_11 (0 << 2)
42#define AT91RM9200_SDRAMC_NR_12 (1 << 2)
43#define AT91RM9200_SDRAMC_NR_13 (2 << 2)
44#define AT91RM9200_SDRAMC_NB (1 << 4) /* Number of Banks */
45#define AT91RM9200_SDRAMC_NB_2 (0 << 4)
46#define AT91RM9200_SDRAMC_NB_4 (1 << 4)
47#define AT91RM9200_SDRAMC_CAS (3 << 5) /* CAS Latency */
48#define AT91RM9200_SDRAMC_CAS_2 (2 << 5)
49#define AT91RM9200_SDRAMC_TWR (0xf << 7) /* Write Recovery Delay */
50#define AT91RM9200_SDRAMC_TRC (0xf << 11) /* Row Cycle Delay */
51#define AT91RM9200_SDRAMC_TRP (0xf << 15) /* Row Precharge Delay */
52#define AT91RM9200_SDRAMC_TRCD (0xf << 19) /* Row to Column Delay */
53#define AT91RM9200_SDRAMC_TRAS (0xf << 23) /* Active to Precharge Delay */
54#define AT91RM9200_SDRAMC_TXSR (0xf << 27) /* Exit Self Refresh to Active Delay */
55
56#define AT91RM9200_SDRAMC_SRR 0x9c /* Self Refresh Register */
57#define AT91RM9200_SDRAMC_LPR 0xa0 /* Low Power Register */
58#define AT91RM9200_SDRAMC_IER 0xa4 /* Interrupt Enable Register */
59#define AT91RM9200_SDRAMC_IDR 0xa8 /* Interrupt Disable Register */
60#define AT91RM9200_SDRAMC_IMR 0xac /* Interrupt Mask Register */
61#define AT91RM9200_SDRAMC_ISR 0xb0 /* Interrupt Status Register */
62
63#endif
diff --git a/include/soc/imx/revision.h b/include/soc/imx/revision.h
new file mode 100644
index 000000000000..9ea346924c35
--- /dev/null
+++ b/include/soc/imx/revision.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright 2015 Linaro Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __SOC_IMX_REVISION_H__
10#define __SOC_IMX_REVISION_H__
11
12#define IMX_CHIP_REVISION_1_0 0x10
13#define IMX_CHIP_REVISION_1_1 0x11
14#define IMX_CHIP_REVISION_1_2 0x12
15#define IMX_CHIP_REVISION_1_3 0x13
16#define IMX_CHIP_REVISION_1_4 0x14
17#define IMX_CHIP_REVISION_1_5 0x15
18#define IMX_CHIP_REVISION_2_0 0x20
19#define IMX_CHIP_REVISION_2_1 0x21
20#define IMX_CHIP_REVISION_2_2 0x22
21#define IMX_CHIP_REVISION_2_3 0x23
22#define IMX_CHIP_REVISION_3_0 0x30
23#define IMX_CHIP_REVISION_3_1 0x31
24#define IMX_CHIP_REVISION_3_2 0x32
25#define IMX_CHIP_REVISION_3_3 0x33
26#define IMX_CHIP_REVISION_UNKNOWN 0xff
27
28int mx27_revision(void);
29int mx31_revision(void);
30int mx35_revision(void);
31int mx51_revision(void);
32int mx53_revision(void);
33
34unsigned int imx_get_soc_revision(void);
35void imx_print_silicon_rev(const char *cpu, int srev);
36
37#endif /* __SOC_IMX_REVISION_H__ */
diff --git a/include/soc/imx/timer.h b/include/soc/imx/timer.h
new file mode 100644
index 000000000000..bbbafd65f464
--- /dev/null
+++ b/include/soc/imx/timer.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright 2015 Linaro Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __SOC_IMX_TIMER_H__
10#define __SOC_IMX_TIMER_H__
11
12enum imx_gpt_type {
13 GPT_TYPE_IMX1, /* i.MX1 */
14 GPT_TYPE_IMX21, /* i.MX21/27 */
15 GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
16 GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
17};
18
19/*
20 * This is a stop-gap solution for clock drivers like imx1/imx21 which call
21 * mxc_timer_init() to initialize timer for non-DT boot. It can be removed
22 * when these legacy non-DT support is converted or dropped.
23 */
24void mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type);
25
26#endif /* __SOC_IMX_TIMER_H__ */
diff --git a/include/soc/sa1100/pwer.h b/include/soc/sa1100/pwer.h
new file mode 100644
index 000000000000..15a545b5a1f6
--- /dev/null
+++ b/include/soc/sa1100/pwer.h
@@ -0,0 +1,15 @@
1#ifndef SOC_SA1100_PWER_H
2#define SOC_SA1100_PWER_H
3
4/*
5 * Copyright (C) 2015, Dmitry Eremin-Solenikov
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12int sa11x0_gpio_set_wake(unsigned int gpio, unsigned int on);
13int sa11x0_sc_set_wake(unsigned int irq, unsigned int on);
14
15#endif
diff --git a/include/soc/tegra/emc.h b/include/soc/tegra/emc.h
new file mode 100644
index 000000000000..f6db33b579ec
--- /dev/null
+++ b/include/soc/tegra/emc.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright (c) 2014 NVIDIA Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __SOC_TEGRA_EMC_H__
10#define __SOC_TEGRA_EMC_H__
11
12struct tegra_emc;
13
14int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
15 unsigned long rate);
16void tegra_emc_complete_timing_change(struct tegra_emc *emc,
17 unsigned long rate);
18
19#endif /* __SOC_TEGRA_EMC_H__ */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index b5f7b5f8d008..b019e3465f11 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -56,6 +56,7 @@ struct tegra_sku_info {
56}; 56};
57 57
58u32 tegra_read_straps(void); 58u32 tegra_read_straps(void);
59u32 tegra_read_ram_code(void);
59u32 tegra_read_chipid(void); 60u32 tegra_read_chipid(void);
60int tegra_fuse_readl(unsigned long offset, u32 *value); 61int tegra_fuse_readl(unsigned long offset, u32 *value);
61 62
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 63deb8d9f82a..1ab2813273cd 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -20,6 +20,12 @@ struct tegra_smmu_enable {
20 unsigned int bit; 20 unsigned int bit;
21}; 21};
22 22
23struct tegra_mc_timing {
24 unsigned long rate;
25
26 u32 *emem_data;
27};
28
23/* latency allowance */ 29/* latency allowance */
24struct tegra_mc_la { 30struct tegra_mc_la {
25 unsigned int reg; 31 unsigned int reg;
@@ -40,6 +46,7 @@ struct tegra_mc_client {
40}; 46};
41 47
42struct tegra_smmu_swgroup { 48struct tegra_smmu_swgroup {
49 const char *name;
43 unsigned int swgroup; 50 unsigned int swgroup;
44 unsigned int reg; 51 unsigned int reg;
45}; 52};
@@ -71,6 +78,7 @@ struct tegra_smmu;
71struct tegra_smmu *tegra_smmu_probe(struct device *dev, 78struct tegra_smmu *tegra_smmu_probe(struct device *dev,
72 const struct tegra_smmu_soc *soc, 79 const struct tegra_smmu_soc *soc,
73 struct tegra_mc *mc); 80 struct tegra_mc *mc);
81void tegra_smmu_remove(struct tegra_smmu *smmu);
74#else 82#else
75static inline struct tegra_smmu * 83static inline struct tegra_smmu *
76tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc, 84tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc,
@@ -78,13 +86,17 @@ tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc,
78{ 86{
79 return NULL; 87 return NULL;
80} 88}
89
90static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
91{
92}
81#endif 93#endif
82 94
83struct tegra_mc_soc { 95struct tegra_mc_soc {
84 const struct tegra_mc_client *clients; 96 const struct tegra_mc_client *clients;
85 unsigned int num_clients; 97 unsigned int num_clients;
86 98
87 const unsigned int *emem_regs; 99 const unsigned long *emem_regs;
88 unsigned int num_emem_regs; 100 unsigned int num_emem_regs;
89 101
90 unsigned int num_address_bits; 102 unsigned int num_address_bits;
@@ -102,6 +114,12 @@ struct tegra_mc {
102 114
103 const struct tegra_mc_soc *soc; 115 const struct tegra_mc_soc *soc;
104 unsigned long tick; 116 unsigned long tick;
117
118 struct tegra_mc_timing *timings;
119 unsigned int num_timings;
105}; 120};
106 121
122void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
123unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
124
107#endif /* __SOC_TEGRA_MC_H__ */ 125#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 65a93273e72f..f5c0de43a5fa 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -26,8 +26,6 @@
26struct clk; 26struct clk;
27struct reset_control; 27struct reset_control;
28 28
29void tegra_pmc_restart(enum reboot_mode mode, const char *cmd);
30
31#ifdef CONFIG_PM_SLEEP 29#ifdef CONFIG_PM_SLEEP
32enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void); 30enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
33void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode); 31void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
diff --git a/include/sound/control.h b/include/sound/control.h
index 95aad6d3fd1a..21d047f229a1 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -252,7 +252,7 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
252 * Helper functions for jack-detection controls 252 * Helper functions for jack-detection controls
253 */ 253 */
254struct snd_kcontrol * 254struct snd_kcontrol *
255snd_kctl_jack_new(const char *name, int idx, void *private_data); 255snd_kctl_jack_new(const char *name, struct snd_card *card);
256void snd_kctl_jack_report(struct snd_card *card, 256void snd_kctl_jack_report(struct snd_card *card,
257 struct snd_kcontrol *kctl, bool status); 257 struct snd_kcontrol *kctl, bool status);
258 258
diff --git a/include/sound/core.h b/include/sound/core.h
index b12931f513f4..cdfecafff0f4 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -224,16 +224,13 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type);
224#endif 224#endif
225 225
226int snd_minor_info_init(void); 226int snd_minor_info_init(void);
227int snd_minor_info_done(void);
228 227
229/* sound_oss.c */ 228/* sound_oss.c */
230 229
231#ifdef CONFIG_SND_OSSEMUL 230#ifdef CONFIG_SND_OSSEMUL
232int snd_minor_info_oss_init(void); 231int snd_minor_info_oss_init(void);
233int snd_minor_info_oss_done(void);
234#else 232#else
235static inline int snd_minor_info_oss_init(void) { return 0; } 233static inline int snd_minor_info_oss_init(void) { return 0; }
236static inline int snd_minor_info_oss_done(void) { return 0; }
237#endif 234#endif
238 235
239/* memory.c */ 236/* memory.c */
@@ -262,7 +259,6 @@ int snd_card_free_when_closed(struct snd_card *card);
262void snd_card_set_id(struct snd_card *card, const char *id); 259void snd_card_set_id(struct snd_card *card, const char *id);
263int snd_card_register(struct snd_card *card); 260int snd_card_register(struct snd_card *card);
264int snd_card_info_init(void); 261int snd_card_info_init(void);
265int snd_card_info_done(void);
266int snd_card_add_dev_attr(struct snd_card *card, 262int snd_card_add_dev_attr(struct snd_card *card,
267 const struct attribute_group *group); 263 const struct attribute_group *group);
268int snd_component_add(struct snd_card *card, const char *component); 264int snd_component_add(struct snd_card *card, const char *component);
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index eb73a3a39ec2..f86ef5ea9b01 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -91,11 +91,6 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
91 */ 91 */
92#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) 92#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
93/* 93/*
94 * The platforms dmaengine driver does not support reporting the amount of
95 * bytes that are still left to transfer.
96 */
97#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(2)
98/*
99 * The PCM is half duplex and the DMA channel is shared between capture and 94 * The PCM is half duplex and the DMA channel is shared between capture and
100 * playback. 95 * playback.
101 */ 96 */
diff --git a/include/sound/emux_synth.h b/include/sound/emux_synth.h
index fb81f3722b6a..a0a40b74bf13 100644
--- a/include/sound/emux_synth.h
+++ b/include/sound/emux_synth.h
@@ -125,7 +125,7 @@ struct snd_emux {
125 125
126 struct snd_util_memhdr *memhdr; /* memory chunk information */ 126 struct snd_util_memhdr *memhdr; /* memory chunk information */
127 127
128#ifdef CONFIG_PROC_FS 128#ifdef CONFIG_SND_PROC_FS
129 struct snd_info_entry *proc; 129 struct snd_info_entry *proc;
130#endif 130#endif
131 131
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
new file mode 100644
index 000000000000..adb5ba5cbd9d
--- /dev/null
+++ b/include/sound/hda_i915.h
@@ -0,0 +1,36 @@
1/*
2 * HD-Audio helpers to sync with i915 driver
3 */
4#ifndef __SOUND_HDA_I915_H
5#define __SOUND_HDA_I915_H
6
7#ifdef CONFIG_SND_HDA_I915
8int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
9int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
10int snd_hdac_get_display_clk(struct hdac_bus *bus);
11int snd_hdac_i915_init(struct hdac_bus *bus);
12int snd_hdac_i915_exit(struct hdac_bus *bus);
13#else
14static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
15{
16 return 0;
17}
18static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
19{
20 return 0;
21}
22static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
23{
24 return 0;
25}
26static inline int snd_hdac_i915_init(struct hdac_bus *bus)
27{
28 return -ENODEV;
29}
30static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
31{
32 return 0;
33}
34#endif
35
36#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
new file mode 100644
index 000000000000..ae995e523ff8
--- /dev/null
+++ b/include/sound/hda_register.h
@@ -0,0 +1,244 @@
1/*
2 * HD-audio controller (Azalia) registers and helpers
3 *
4 * For traditional reasons, we still use azx_ prefix here
5 */
6
7#ifndef __SOUND_HDA_REGISTER_H
8#define __SOUND_HDA_REGISTER_H
9
10#include <linux/io.h>
11#include <sound/hdaudio.h>
12
13#define AZX_REG_GCAP 0x00
14#define AZX_GCAP_64OK (1 << 0) /* 64bit address support */
15#define AZX_GCAP_NSDO (3 << 1) /* # of serial data out signals */
16#define AZX_GCAP_BSS (31 << 3) /* # of bidirectional streams */
17#define AZX_GCAP_ISS (15 << 8) /* # of input streams */
18#define AZX_GCAP_OSS (15 << 12) /* # of output streams */
19#define AZX_REG_VMIN 0x02
20#define AZX_REG_VMAJ 0x03
21#define AZX_REG_OUTPAY 0x04
22#define AZX_REG_INPAY 0x06
23#define AZX_REG_GCTL 0x08
24#define AZX_GCTL_RESET (1 << 0) /* controller reset */
25#define AZX_GCTL_FCNTRL (1 << 1) /* flush control */
26#define AZX_GCTL_UNSOL (1 << 8) /* accept unsol. response enable */
27#define AZX_REG_WAKEEN 0x0c
28#define AZX_REG_STATESTS 0x0e
29#define AZX_REG_GSTS 0x10
30#define AZX_GSTS_FSTS (1 << 1) /* flush status */
31#define AZX_REG_GCAP2 0x12
32#define AZX_REG_LLCH 0x14
33#define AZX_REG_OUTSTRMPAY 0x18
34#define AZX_REG_INSTRMPAY 0x1A
35#define AZX_REG_INTCTL 0x20
36#define AZX_REG_INTSTS 0x24
37#define AZX_REG_WALLCLK 0x30 /* 24Mhz source */
38#define AZX_REG_OLD_SSYNC 0x34 /* SSYNC for old ICH */
39#define AZX_REG_SSYNC 0x38
40#define AZX_REG_CORBLBASE 0x40
41#define AZX_REG_CORBUBASE 0x44
42#define AZX_REG_CORBWP 0x48
43#define AZX_REG_CORBRP 0x4a
44#define AZX_CORBRP_RST (1 << 15) /* read pointer reset */
45#define AZX_REG_CORBCTL 0x4c
46#define AZX_CORBCTL_RUN (1 << 1) /* enable DMA */
47#define AZX_CORBCTL_CMEIE (1 << 0) /* enable memory error irq */
48#define AZX_REG_CORBSTS 0x4d
49#define AZX_CORBSTS_CMEI (1 << 0) /* memory error indication */
50#define AZX_REG_CORBSIZE 0x4e
51
52#define AZX_REG_RIRBLBASE 0x50
53#define AZX_REG_RIRBUBASE 0x54
54#define AZX_REG_RIRBWP 0x58
55#define AZX_RIRBWP_RST (1 << 15) /* write pointer reset */
56#define AZX_REG_RINTCNT 0x5a
57#define AZX_REG_RIRBCTL 0x5c
58#define AZX_RBCTL_IRQ_EN (1 << 0) /* enable IRQ */
59#define AZX_RBCTL_DMA_EN (1 << 1) /* enable DMA */
60#define AZX_RBCTL_OVERRUN_EN (1 << 2) /* enable overrun irq */
61#define AZX_REG_RIRBSTS 0x5d
62#define AZX_RBSTS_IRQ (1 << 0) /* response irq */
63#define AZX_RBSTS_OVERRUN (1 << 2) /* overrun irq */
64#define AZX_REG_RIRBSIZE 0x5e
65
66#define AZX_REG_IC 0x60
67#define AZX_REG_IR 0x64
68#define AZX_REG_IRS 0x68
69#define AZX_IRS_VALID (1<<1)
70#define AZX_IRS_BUSY (1<<0)
71
72#define AZX_REG_DPLBASE 0x70
73#define AZX_REG_DPUBASE 0x74
74#define AZX_DPLBASE_ENABLE 0x1 /* Enable position buffer */
75
76/* SD offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
77enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
78
79/* stream register offsets from stream base */
80#define AZX_REG_SD_CTL 0x00
81#define AZX_REG_SD_STS 0x03
82#define AZX_REG_SD_LPIB 0x04
83#define AZX_REG_SD_CBL 0x08
84#define AZX_REG_SD_LVI 0x0c
85#define AZX_REG_SD_FIFOW 0x0e
86#define AZX_REG_SD_FIFOSIZE 0x10
87#define AZX_REG_SD_FORMAT 0x12
88#define AZX_REG_SD_FIFOL 0x14
89#define AZX_REG_SD_BDLPL 0x18
90#define AZX_REG_SD_BDLPU 0x1c
91
92/* Haswell/Broadwell display HD-A controller Extended Mode registers */
93#define AZX_REG_HSW_EM4 0x100c
94#define AZX_REG_HSW_EM5 0x1010
95
96/* PCI space */
97#define AZX_PCIREG_TCSEL 0x44
98
99/*
100 * other constants
101 */
102
103/* max number of fragments - we may use more if allocating more pages for BDL */
104#define BDL_SIZE 4096
105#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16)
106#define AZX_MAX_FRAG 32
107/* max buffer size - no h/w limit, you can increase as you like */
108#define AZX_MAX_BUF_SIZE (1024*1024*1024)
109
110/* RIRB int mask: overrun[2], response[0] */
111#define RIRB_INT_RESPONSE 0x01
112#define RIRB_INT_OVERRUN 0x04
113#define RIRB_INT_MASK 0x05
114
115/* STATESTS int mask: S3,SD2,SD1,SD0 */
116#define STATESTS_INT_MASK ((1 << HDA_MAX_CODECS) - 1)
117
118/* SD_CTL bits */
119#define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */
120#define SD_CTL_DMA_START 0x02 /* stream DMA start bit */
121#define SD_CTL_STRIPE (3 << 16) /* stripe control */
122#define SD_CTL_TRAFFIC_PRIO (1 << 18) /* traffic priority */
123#define SD_CTL_DIR (1 << 19) /* bi-directional stream */
124#define SD_CTL_STREAM_TAG_MASK (0xf << 20)
125#define SD_CTL_STREAM_TAG_SHIFT 20
126
127/* SD_CTL and SD_STS */
128#define SD_INT_DESC_ERR 0x10 /* descriptor error interrupt */
129#define SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
130#define SD_INT_COMPLETE 0x04 /* completion interrupt */
131#define SD_INT_MASK (SD_INT_DESC_ERR|SD_INT_FIFO_ERR|\
132 SD_INT_COMPLETE)
133
134/* SD_STS */
135#define SD_STS_FIFO_READY 0x20 /* FIFO ready */
136
137/* INTCTL and INTSTS */
138#define AZX_INT_ALL_STREAM 0xff /* all stream interrupts */
139#define AZX_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */
140#define AZX_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */
141
142/* below are so far hardcoded - should read registers in future */
143#define AZX_MAX_CORB_ENTRIES 256
144#define AZX_MAX_RIRB_ENTRIES 256
145
146/* Capability header Structure */
147#define AZX_REG_CAP_HDR 0x0
148#define AZX_CAP_HDR_VER_OFF 28
149#define AZX_CAP_HDR_VER_MASK (0xF << AZX_CAP_HDR_VER_OFF)
150#define AZX_CAP_HDR_ID_OFF 16
151#define AZX_CAP_HDR_ID_MASK (0xFFF << AZX_CAP_HDR_ID_OFF)
152#define AZX_CAP_HDR_NXT_PTR_MASK 0xFFFF
153
154/* registers of Software Position Based FIFO Capability Structure */
155#define AZX_SPB_CAP_ID 0x4
156#define AZX_REG_SPB_BASE_ADDR 0x700
157#define AZX_REG_SPB_SPBFCH 0x00
158#define AZX_REG_SPB_SPBFCCTL 0x04
159/* Base used to calculate the iterating register offset */
160#define AZX_SPB_BASE 0x08
161/* Interval used to calculate the iterating register offset */
162#define AZX_SPB_INTERVAL 0x08
163
164/* registers of Global Time Synchronization Capability Structure */
165#define AZX_GTS_CAP_ID 0x1
166#define AZX_REG_GTS_GTSCH 0x00
167#define AZX_REG_GTS_GTSCD 0x04
168#define AZX_REG_GTS_GTSCTLAC 0x0C
169#define AZX_GTS_BASE 0x20
170#define AZX_GTS_INTERVAL 0x20
171
172/* registers for Processing Pipe Capability Structure */
173#define AZX_PP_CAP_ID 0x3
174#define AZX_REG_PP_PPCH 0x10
175#define AZX_REG_PP_PPCTL 0x04
176#define AZX_PPCTL_PIE (1<<31)
177#define AZX_PPCTL_GPROCEN (1<<30)
178/* _X_ = dma engine # and cannot * exceed 29 (per spec max 30 dma engines) */
179#define AZX_PPCTL_PROCEN(_X_) (1<<(_X_))
180
181#define AZX_REG_PP_PPSTS 0x08
182
183#define AZX_PPHC_BASE 0x10
184#define AZX_PPHC_INTERVAL 0x10
185
186#define AZX_REG_PPHCLLPL 0x0
187#define AZX_REG_PPHCLLPU 0x4
188#define AZX_REG_PPHCLDPL 0x8
189#define AZX_REG_PPHCLDPU 0xC
190
191#define AZX_PPLC_BASE 0x10
192#define AZX_PPLC_MULTI 0x10
193#define AZX_PPLC_INTERVAL 0x10
194
195#define AZX_REG_PPLCCTL 0x0
196#define AZX_PPLCCTL_STRM_BITS 4
197#define AZX_PPLCCTL_STRM_SHIFT 20
198#define AZX_REG_MASK(bit_num, offset) \
199 (((1 << (bit_num)) - 1) << (offset))
200#define AZX_PPLCCTL_STRM_MASK \
201 AZX_REG_MASK(AZX_PPLCCTL_STRM_BITS, AZX_PPLCCTL_STRM_SHIFT)
202#define AZX_PPLCCTL_RUN (1<<1)
203#define AZX_PPLCCTL_STRST (1<<0)
204
205#define AZX_REG_PPLCFMT 0x4
206#define AZX_REG_PPLCLLPL 0x8
207#define AZX_REG_PPLCLLPU 0xC
208
209/* registers for Multiple Links Capability Structure */
210#define AZX_ML_CAP_ID 0x2
211#define AZX_REG_ML_MLCH 0x00
212#define AZX_REG_ML_MLCD 0x04
213#define AZX_ML_BASE 0x40
214#define AZX_ML_INTERVAL 0x40
215
216#define AZX_REG_ML_LCAP 0x00
217#define AZX_REG_ML_LCTL 0x04
218#define AZX_REG_ML_LOSIDV 0x08
219#define AZX_REG_ML_LSDIID 0x0C
220#define AZX_REG_ML_LPSOO 0x10
221#define AZX_REG_ML_LPSIO 0x12
222#define AZX_REG_ML_LWALFC 0x18
223#define AZX_REG_ML_LOUTPAY 0x20
224#define AZX_REG_ML_LINPAY 0x30
225
226#define AZX_MLCTL_SPA (1<<16)
227#define AZX_MLCTL_CPA 23
228
229/*
230 * helpers to read the stream position
231 */
232static inline unsigned int
233snd_hdac_stream_get_pos_lpib(struct hdac_stream *stream)
234{
235 return snd_hdac_stream_readl(stream, SD_LPIB);
236}
237
238static inline unsigned int
239snd_hdac_stream_get_pos_posbuf(struct hdac_stream *stream)
240{
241 return le32_to_cpu(*stream->posbuf);
242}
243
244#endif /* __SOUND_HDA_REGISTER_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 2a8aa9dfb83d..4caf1fde8a4f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -6,12 +6,18 @@
6#define __SOUND_HDAUDIO_H 6#define __SOUND_HDAUDIO_H
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/interrupt.h>
10#include <linux/timecounter.h>
11#include <sound/core.h>
12#include <sound/memalloc.h>
9#include <sound/hda_verbs.h> 13#include <sound/hda_verbs.h>
14#include <drm/i915_component.h>
10 15
11/* codec node id */ 16/* codec node id */
12typedef u16 hda_nid_t; 17typedef u16 hda_nid_t;
13 18
14struct hdac_bus; 19struct hdac_bus;
20struct hdac_stream;
15struct hdac_device; 21struct hdac_device;
16struct hdac_driver; 22struct hdac_driver;
17struct hdac_widget_tree; 23struct hdac_widget_tree;
@@ -22,6 +28,16 @@ struct hdac_widget_tree;
22extern struct bus_type snd_hda_bus_type; 28extern struct bus_type snd_hda_bus_type;
23 29
24/* 30/*
31 * HDA device table
32 */
33struct hda_device_id {
34 __u32 vendor_id;
35 __u32 rev_id;
36 const char *name;
37 unsigned long driver_data;
38};
39
40/*
25 * generic arrays 41 * generic arrays
26 */ 42 */
27struct snd_array { 43struct snd_array {
@@ -69,6 +85,7 @@ struct hdac_device {
69 85
70 /* misc flags */ 86 /* misc flags */
71 atomic_t in_pm; /* suspend/resume being performed */ 87 atomic_t in_pm; /* suspend/resume being performed */
88 bool link_power_control:1;
72 89
73 /* sysfs */ 90 /* sysfs */
74 struct hdac_widget_tree *widgets; 91 struct hdac_widget_tree *widgets;
@@ -85,6 +102,7 @@ struct hdac_device {
85enum { 102enum {
86 HDA_DEV_CORE, 103 HDA_DEV_CORE,
87 HDA_DEV_LEGACY, 104 HDA_DEV_LEGACY,
105 HDA_DEV_ASOC,
88}; 106};
89 107
90/* direction */ 108/* direction */
@@ -118,6 +136,15 @@ int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
118 hda_nid_t *conn_list, int max_conns); 136 hda_nid_t *conn_list, int max_conns);
119int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid, 137int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
120 hda_nid_t *start_id); 138 hda_nid_t *start_id);
139unsigned int snd_hdac_calc_stream_format(unsigned int rate,
140 unsigned int channels,
141 unsigned int format,
142 unsigned int maxbps,
143 unsigned short spdif_ctls);
144int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
145 u32 *ratesp, u64 *formatsp, unsigned int *bpsp);
146bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid,
147 unsigned int format);
121 148
122/** 149/**
123 * snd_hdac_read_parm - read a codec parameter 150 * snd_hdac_read_parm - read a codec parameter
@@ -154,14 +181,18 @@ static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
154struct hdac_driver { 181struct hdac_driver {
155 struct device_driver driver; 182 struct device_driver driver;
156 int type; 183 int type;
184 const struct hda_device_id *id_table;
157 int (*match)(struct hdac_device *dev, struct hdac_driver *drv); 185 int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
158 void (*unsol_event)(struct hdac_device *dev, unsigned int event); 186 void (*unsol_event)(struct hdac_device *dev, unsigned int event);
159}; 187};
160 188
161#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver) 189#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
162 190
191const struct hda_device_id *
192hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv);
193
163/* 194/*
164 * HD-audio bus base driver 195 * Bus verb operators
165 */ 196 */
166struct hdac_bus_ops { 197struct hdac_bus_ops {
167 /* send a single command */ 198 /* send a single command */
@@ -169,13 +200,59 @@ struct hdac_bus_ops {
169 /* get a response from the last command */ 200 /* get a response from the last command */
170 int (*get_response)(struct hdac_bus *bus, unsigned int addr, 201 int (*get_response)(struct hdac_bus *bus, unsigned int addr,
171 unsigned int *res); 202 unsigned int *res);
203 /* control the link power */
204 int (*link_power)(struct hdac_bus *bus, bool enable);
205};
206
207/*
208 * Lowlevel I/O operators
209 */
210struct hdac_io_ops {
211 /* mapped register accesses */
212 void (*reg_writel)(u32 value, u32 __iomem *addr);
213 u32 (*reg_readl)(u32 __iomem *addr);
214 void (*reg_writew)(u16 value, u16 __iomem *addr);
215 u16 (*reg_readw)(u16 __iomem *addr);
216 void (*reg_writeb)(u8 value, u8 __iomem *addr);
217 u8 (*reg_readb)(u8 __iomem *addr);
218 /* Allocation ops */
219 int (*dma_alloc_pages)(struct hdac_bus *bus, int type, size_t size,
220 struct snd_dma_buffer *buf);
221 void (*dma_free_pages)(struct hdac_bus *bus,
222 struct snd_dma_buffer *buf);
172}; 223};
173 224
174#define HDA_UNSOL_QUEUE_SIZE 64 225#define HDA_UNSOL_QUEUE_SIZE 64
226#define HDA_MAX_CODECS 8 /* limit by controller side */
227
228/* HD Audio class code */
229#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
230
231/*
232 * CORB/RIRB
233 *
234 * Each CORB entry is 4byte, RIRB is 8byte
235 */
236struct hdac_rb {
237 __le32 *buf; /* virtual address of CORB/RIRB buffer */
238 dma_addr_t addr; /* physical address of CORB/RIRB buffer */
239 unsigned short rp, wp; /* RIRB read/write pointers */
240 int cmds[HDA_MAX_CODECS]; /* number of pending requests */
241 u32 res[HDA_MAX_CODECS]; /* last read value */
242};
175 243
244/*
245 * HD-audio bus base driver
246 */
176struct hdac_bus { 247struct hdac_bus {
177 struct device *dev; 248 struct device *dev;
178 const struct hdac_bus_ops *ops; 249 const struct hdac_bus_ops *ops;
250 const struct hdac_io_ops *io_ops;
251
252 /* h/w resources */
253 unsigned long addr;
254 void __iomem *remap_addr;
255 int irq;
179 256
180 /* codec linked list */ 257 /* codec linked list */
181 struct list_head codec_list; 258 struct list_head codec_list;
@@ -189,18 +266,49 @@ struct hdac_bus {
189 unsigned int unsol_rp, unsol_wp; 266 unsigned int unsol_rp, unsol_wp;
190 struct work_struct unsol_work; 267 struct work_struct unsol_work;
191 268
269 /* bit flags of detected codecs */
270 unsigned long codec_mask;
271
192 /* bit flags of powered codecs */ 272 /* bit flags of powered codecs */
193 unsigned long codec_powered; 273 unsigned long codec_powered;
194 274
195 /* flags */ 275 /* CORB/RIRB */
276 struct hdac_rb corb;
277 struct hdac_rb rirb;
278 unsigned int last_cmd[HDA_MAX_CODECS]; /* last sent command */
279
280 /* CORB/RIRB and position buffers */
281 struct snd_dma_buffer rb;
282 struct snd_dma_buffer posbuf;
283
284 /* hdac_stream linked list */
285 struct list_head stream_list;
286
287 /* operation state */
288 bool chip_init:1; /* h/w initialized */
289
290 /* behavior flags */
196 bool sync_write:1; /* sync after verb write */ 291 bool sync_write:1; /* sync after verb write */
292 bool use_posbuf:1; /* use position buffer */
293 bool snoop:1; /* enable snooping */
294 bool align_bdle_4k:1; /* BDLE align 4K boundary */
295 bool reverse_assign:1; /* assign devices in reverse order */
296 bool corbrp_self_clear:1; /* CORBRP clears itself after reset */
297
298 int bdl_pos_adj; /* BDL position adjustment */
197 299
198 /* locks */ 300 /* locks */
301 spinlock_t reg_lock;
199 struct mutex cmd_mutex; 302 struct mutex cmd_mutex;
303
304 /* i915 component interface */
305 struct i915_audio_component *audio_component;
306 int i915_power_refcount;
200}; 307};
201 308
202int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, 309int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
203 const struct hdac_bus_ops *ops); 310 const struct hdac_bus_ops *ops,
311 const struct hdac_io_ops *io_ops);
204void snd_hdac_bus_exit(struct hdac_bus *bus); 312void snd_hdac_bus_exit(struct hdac_bus *bus);
205int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr, 313int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
206 unsigned int cmd, unsigned int *res); 314 unsigned int cmd, unsigned int *res);
@@ -222,6 +330,201 @@ static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
222 clear_bit(codec->addr, &codec->bus->codec_powered); 330 clear_bit(codec->addr, &codec->bus->codec_powered);
223} 331}
224 332
333int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
334int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
335 unsigned int *res);
336int snd_hdac_link_power(struct hdac_device *codec, bool enable);
337
338bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset);
339void snd_hdac_bus_stop_chip(struct hdac_bus *bus);
340void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
341void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
342void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
343void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
344
345void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
346void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
347 void (*ack)(struct hdac_bus *,
348 struct hdac_stream *));
349
350int snd_hdac_bus_alloc_stream_pages(struct hdac_bus *bus);
351void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus);
352
353/*
354 * macros for easy use
355 */
356#define _snd_hdac_chip_write(type, chip, reg, value) \
357 ((chip)->io_ops->reg_write ## type(value, (chip)->remap_addr + (reg)))
358#define _snd_hdac_chip_read(type, chip, reg) \
359 ((chip)->io_ops->reg_read ## type((chip)->remap_addr + (reg)))
360
361/* read/write a register, pass without AZX_REG_ prefix */
362#define snd_hdac_chip_writel(chip, reg, value) \
363 _snd_hdac_chip_write(l, chip, AZX_REG_ ## reg, value)
364#define snd_hdac_chip_writew(chip, reg, value) \
365 _snd_hdac_chip_write(w, chip, AZX_REG_ ## reg, value)
366#define snd_hdac_chip_writeb(chip, reg, value) \
367 _snd_hdac_chip_write(b, chip, AZX_REG_ ## reg, value)
368#define snd_hdac_chip_readl(chip, reg) \
369 _snd_hdac_chip_read(l, chip, AZX_REG_ ## reg)
370#define snd_hdac_chip_readw(chip, reg) \
371 _snd_hdac_chip_read(w, chip, AZX_REG_ ## reg)
372#define snd_hdac_chip_readb(chip, reg) \
373 _snd_hdac_chip_read(b, chip, AZX_REG_ ## reg)
374
375/* update a register, pass without AZX_REG_ prefix */
376#define snd_hdac_chip_updatel(chip, reg, mask, val) \
377 snd_hdac_chip_writel(chip, reg, \
378 (snd_hdac_chip_readl(chip, reg) & ~(mask)) | (val))
379#define snd_hdac_chip_updatew(chip, reg, mask, val) \
380 snd_hdac_chip_writew(chip, reg, \
381 (snd_hdac_chip_readw(chip, reg) & ~(mask)) | (val))
382#define snd_hdac_chip_updateb(chip, reg, mask, val) \
383 snd_hdac_chip_writeb(chip, reg, \
384 (snd_hdac_chip_readb(chip, reg) & ~(mask)) | (val))
385
386/*
387 * HD-audio stream
388 */
389struct hdac_stream {
390 struct hdac_bus *bus;
391 struct snd_dma_buffer bdl; /* BDL buffer */
392 __le32 *posbuf; /* position buffer pointer */
393 int direction; /* playback / capture (SNDRV_PCM_STREAM_*) */
394
395 unsigned int bufsize; /* size of the play buffer in bytes */
396 unsigned int period_bytes; /* size of the period in bytes */
397 unsigned int frags; /* number for period in the play buffer */
398 unsigned int fifo_size; /* FIFO size */
399
400 void __iomem *sd_addr; /* stream descriptor pointer */
401
402 u32 sd_int_sta_mask; /* stream int status mask */
403
404 /* pcm support */
405 struct snd_pcm_substream *substream; /* assigned substream,
406 * set in PCM open
407 */
408 unsigned int format_val; /* format value to be set in the
409 * controller and the codec
410 */
411 unsigned char stream_tag; /* assigned stream */
412 unsigned char index; /* stream index */
413 int assigned_key; /* last device# key assigned to */
414
415 bool opened:1;
416 bool running:1;
417 bool prepared:1;
418 bool no_period_wakeup:1;
419 bool locked:1;
420
421 /* timestamp */
422 unsigned long start_wallclk; /* start + minimum wallclk */
423 unsigned long period_wallclk; /* wallclk for period */
424 struct timecounter tc;
425 struct cyclecounter cc;
426 int delay_negative_threshold;
427
428 struct list_head list;
429#ifdef CONFIG_SND_HDA_DSP_LOADER
430 /* DSP access mutex */
431 struct mutex dsp_mutex;
432#endif
433};
434
435void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
436 int idx, int direction, int tag);
437struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
438 struct snd_pcm_substream *substream);
439void snd_hdac_stream_release(struct hdac_stream *azx_dev);
440
441int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
442void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
443int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev);
444int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
445 unsigned int format_val);
446void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
447void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
448void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
449void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
450void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
451 unsigned int streams, unsigned int reg);
452void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
453 unsigned int streams);
454void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
455 unsigned int streams);
456/*
457 * macros for easy use
458 */
459#define _snd_hdac_stream_write(type, dev, reg, value) \
460 ((dev)->bus->io_ops->reg_write ## type(value, (dev)->sd_addr + (reg)))
461#define _snd_hdac_stream_read(type, dev, reg) \
462 ((dev)->bus->io_ops->reg_read ## type((dev)->sd_addr + (reg)))
463
464/* read/write a register, pass without AZX_REG_ prefix */
465#define snd_hdac_stream_writel(dev, reg, value) \
466 _snd_hdac_stream_write(l, dev, AZX_REG_ ## reg, value)
467#define snd_hdac_stream_writew(dev, reg, value) \
468 _snd_hdac_stream_write(w, dev, AZX_REG_ ## reg, value)
469#define snd_hdac_stream_writeb(dev, reg, value) \
470 _snd_hdac_stream_write(b, dev, AZX_REG_ ## reg, value)
471#define snd_hdac_stream_readl(dev, reg) \
472 _snd_hdac_stream_read(l, dev, AZX_REG_ ## reg)
473#define snd_hdac_stream_readw(dev, reg) \
474 _snd_hdac_stream_read(w, dev, AZX_REG_ ## reg)
475#define snd_hdac_stream_readb(dev, reg) \
476 _snd_hdac_stream_read(b, dev, AZX_REG_ ## reg)
477
478/* update a register, pass without AZX_REG_ prefix */
479#define snd_hdac_stream_updatel(dev, reg, mask, val) \
480 snd_hdac_stream_writel(dev, reg, \
481 (snd_hdac_stream_readl(dev, reg) & \
482 ~(mask)) | (val))
483#define snd_hdac_stream_updatew(dev, reg, mask, val) \
484 snd_hdac_stream_writew(dev, reg, \
485 (snd_hdac_stream_readw(dev, reg) & \
486 ~(mask)) | (val))
487#define snd_hdac_stream_updateb(dev, reg, mask, val) \
488 snd_hdac_stream_writeb(dev, reg, \
489 (snd_hdac_stream_readb(dev, reg) & \
490 ~(mask)) | (val))
491
492#ifdef CONFIG_SND_HDA_DSP_LOADER
493/* DSP lock helpers */
494#define snd_hdac_dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
495#define snd_hdac_dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
496#define snd_hdac_dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
497#define snd_hdac_stream_is_locked(dev) ((dev)->locked)
498/* DSP loader helpers */
499int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
500 unsigned int byte_size, struct snd_dma_buffer *bufp);
501void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start);
502void snd_hdac_dsp_cleanup(struct hdac_stream *azx_dev,
503 struct snd_dma_buffer *dmab);
504#else /* CONFIG_SND_HDA_DSP_LOADER */
505#define snd_hdac_dsp_lock_init(dev) do {} while (0)
506#define snd_hdac_dsp_lock(dev) do {} while (0)
507#define snd_hdac_dsp_unlock(dev) do {} while (0)
508#define snd_hdac_stream_is_locked(dev) 0
509
510static inline int
511snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
512 unsigned int byte_size, struct snd_dma_buffer *bufp)
513{
514 return 0;
515}
516
517static inline void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start)
518{
519}
520
521static inline void snd_hdac_dsp_cleanup(struct hdac_stream *azx_dev,
522 struct snd_dma_buffer *dmab)
523{
524}
525#endif /* CONFIG_SND_HDA_DSP_LOADER */
526
527
225/* 528/*
226 * generic array helpers 529 * generic array helpers
227 */ 530 */
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
new file mode 100644
index 000000000000..0f89df1511dc
--- /dev/null
+++ b/include/sound/hdaudio_ext.h
@@ -0,0 +1,132 @@
1#ifndef __SOUND_HDAUDIO_EXT_H
2#define __SOUND_HDAUDIO_EXT_H
3
4#include <sound/hdaudio.h>
5
6/**
7 * hdac_ext_bus: HDAC extended bus for extended HDA caps
8 *
9 * @bus: hdac bus
10 * @num_streams: streams supported
11 * @ppcap: pp capabilities pointer
12 * @spbcap: SPIB capabilities pointer
13 * @mlcap: MultiLink capabilities pointer
14 * @gtscap: gts capabilities pointer
15 * @hlink_list: link list of HDA links
16 */
17struct hdac_ext_bus {
18 struct hdac_bus bus;
19 int num_streams;
20 int idx;
21
22 void __iomem *ppcap;
23 void __iomem *spbcap;
24 void __iomem *mlcap;
25 void __iomem *gtscap;
26
27 struct list_head hlink_list;
28};
29
30int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
31 const struct hdac_bus_ops *ops,
32 const struct hdac_io_ops *io_ops);
33
34void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
35int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
36void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
37
38#define ebus_to_hbus(ebus) (&(ebus)->bus)
39#define hbus_to_ebus(_bus) \
40 container_of(_bus, struct hdac_ext_bus, bus)
41
42int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *sbus);
43void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable);
44void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable);
45
46void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *chip,
47 bool enable, int index);
48
49int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *bus);
50struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *bus,
51 const char *codec_name);
52
53enum hdac_ext_stream_type {
54 HDAC_EXT_STREAM_TYPE_COUPLED = 0,
55 HDAC_EXT_STREAM_TYPE_HOST,
56 HDAC_EXT_STREAM_TYPE_LINK
57};
58
59/**
60 * hdac_ext_stream: HDAC extended stream for extended HDA caps
61 *
62 * @hstream: hdac_stream
63 * @pphc_addr: processing pipe host stream pointer
64 * @pplc_addr: processing pipe link stream pointer
65 * @decoupled: stream host and link is decoupled
66 * @link_locked: link is locked
67 * @link_prepared: link is prepared
68 * link_substream: link substream
69 */
70struct hdac_ext_stream {
71 struct hdac_stream hstream;
72
73 void __iomem *pphc_addr;
74 void __iomem *pplc_addr;
75
76 bool decoupled:1;
77 bool link_locked:1;
78 bool link_prepared;
79
80 struct snd_pcm_substream *link_substream;
81};
82
83#define hdac_stream(s) (&(s)->hstream)
84#define stream_to_hdac_ext_stream(s) \
85 container_of(s, struct hdac_ext_stream, hstream)
86
87void snd_hdac_ext_stream_init(struct hdac_ext_bus *bus,
88 struct hdac_ext_stream *stream, int idx,
89 int direction, int tag);
90int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx,
91 int num_stream, int dir);
92void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus);
93void snd_hdac_link_free_all(struct hdac_ext_bus *ebus);
94struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_ext_bus *bus,
95 struct snd_pcm_substream *substream,
96 int type);
97void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
98void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
99 struct hdac_ext_stream *azx_dev, bool decouple);
100void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
101
102void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
103void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
104void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
105int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *stream, int fmt);
106
107struct hdac_ext_link {
108 struct hdac_bus *bus;
109 int index;
110 void __iomem *ml_addr; /* link output stream reg pointer */
111 u32 lcaps; /* link capablities */
112 u16 lsdiid; /* link sdi identifier */
113 struct list_head list;
114};
115
116int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
117int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
118void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
119 int stream);
120void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
121 int stream);
122
123/* update register macro */
124#define snd_hdac_updatel(addr, reg, mask, val) \
125 writel(((readl(addr + reg) & ~(mask)) | (val)), \
126 addr + reg)
127
128#define snd_hdac_updatew(addr, reg, mask, val) \
129 writew(((readw(addr + reg) & ~(mask)) | (val)), \
130 addr + reg)
131
132#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/info.h b/include/sound/info.h
index 9ca1a493d370..67390ee846aa 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -23,6 +23,8 @@
23 */ 23 */
24 24
25#include <linux/poll.h> 25#include <linux/poll.h>
26#include <linux/seq_file.h>
27#include <sound/core.h>
26 28
27/* buffer for information */ 29/* buffer for information */
28struct snd_info_buffer { 30struct snd_info_buffer {
@@ -90,16 +92,14 @@ struct snd_info_entry {
90 struct list_head list; 92 struct list_head list;
91}; 93};
92 94
93#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS) 95#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_SND_PROC_FS)
94int snd_info_minor_register(void); 96int snd_info_minor_register(void);
95int snd_info_minor_unregister(void);
96#else 97#else
97#define snd_info_minor_register() /* NOP */ 98#define snd_info_minor_register() 0
98#define snd_info_minor_unregister() /* NOP */
99#endif 99#endif
100 100
101 101
102#ifdef CONFIG_PROC_FS 102#ifdef CONFIG_SND_PROC_FS
103 103
104extern struct snd_info_entry *snd_seq_root; 104extern struct snd_info_entry *snd_seq_root;
105#ifdef CONFIG_SND_OSSEMUL 105#ifdef CONFIG_SND_OSSEMUL
@@ -110,8 +110,18 @@ void snd_card_info_read_oss(struct snd_info_buffer *buffer);
110static inline void snd_card_info_read_oss(struct snd_info_buffer *buffer) {} 110static inline void snd_card_info_read_oss(struct snd_info_buffer *buffer) {}
111#endif 111#endif
112 112
113__printf(2, 3) 113/**
114int snd_iprintf(struct snd_info_buffer *buffer, const char *fmt, ...); 114 * snd_iprintf - printf on the procfs buffer
115 * @buf: the procfs buffer
116 * @fmt: the printf format
117 *
118 * Outputs the string on the procfs buffer just like printf().
119 *
120 * Return: zero for success, or a negative error code.
121 */
122#define snd_iprintf(buf, fmt, args...) \
123 seq_printf((struct seq_file *)(buf)->buffer, fmt, ##args)
124
115int snd_info_init(void); 125int snd_info_init(void);
116int snd_info_done(void); 126int snd_info_done(void);
117 127
@@ -135,8 +145,12 @@ void snd_info_card_id_change(struct snd_card *card);
135int snd_info_register(struct snd_info_entry *entry); 145int snd_info_register(struct snd_info_entry *entry);
136 146
137/* for card drivers */ 147/* for card drivers */
138int snd_card_proc_new(struct snd_card *card, const char *name, 148static inline int snd_card_proc_new(struct snd_card *card, const char *name,
139 struct snd_info_entry **entryp); 149 struct snd_info_entry **entryp)
150{
151 *entryp = snd_info_create_card_entry(card, name, card->proc_root);
152 return *entryp ? 0 : -ENOMEM;
153}
140 154
141static inline void snd_info_set_text_ops(struct snd_info_entry *entry, 155static inline void snd_info_set_text_ops(struct snd_info_entry *entry,
142 void *private_data, 156 void *private_data,
@@ -175,7 +189,6 @@ static inline int snd_card_proc_new(struct snd_card *card, const char *name,
175static inline void snd_info_set_text_ops(struct snd_info_entry *entry __attribute__((unused)), 189static inline void snd_info_set_text_ops(struct snd_info_entry *entry __attribute__((unused)),
176 void *private_data, 190 void *private_data,
177 void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) {} 191 void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) {}
178
179static inline int snd_info_check_reserved_words(const char *str) { return 1; } 192static inline int snd_info_check_reserved_words(const char *str) { return 1; }
180 193
181#endif 194#endif
@@ -184,7 +197,7 @@ static inline int snd_info_check_reserved_words(const char *str) { return 1; }
184 * OSS info part 197 * OSS info part
185 */ 198 */
186 199
187#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS) 200#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_SND_PROC_FS)
188 201
189#define SNDRV_OSS_INFO_DEV_AUDIO 0 202#define SNDRV_OSS_INFO_DEV_AUDIO 0
190#define SNDRV_OSS_INFO_DEV_SYNTH 1 203#define SNDRV_OSS_INFO_DEV_SYNTH 1
@@ -197,6 +210,6 @@ static inline int snd_info_check_reserved_words(const char *str) { return 1; }
197int snd_oss_info_register(int dev, int num, char *string); 210int snd_oss_info_register(int dev, int num, char *string);
198#define snd_oss_info_unregister(dev, num) snd_oss_info_register(dev, num, NULL) 211#define snd_oss_info_unregister(dev, num) snd_oss_info_register(dev, num, NULL)
199 212
200#endif /* CONFIG_SND_OSSEMUL && CONFIG_PROC_FS */ 213#endif /* CONFIG_SND_OSSEMUL && CONFIG_SND_PROC_FS */
201 214
202#endif /* __SOUND_INFO_H */ 215#endif /* __SOUND_INFO_H */
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 218235030ebc..23bede121c78 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -73,6 +73,8 @@ enum snd_jack_types {
73 73
74struct snd_jack { 74struct snd_jack {
75 struct input_dev *input_dev; 75 struct input_dev *input_dev;
76 struct list_head kctl_list;
77 struct snd_card *card;
76 int registered; 78 int registered;
77 int type; 79 int type;
78 const char *id; 80 const char *id;
@@ -85,7 +87,8 @@ struct snd_jack {
85#ifdef CONFIG_SND_JACK 87#ifdef CONFIG_SND_JACK
86 88
87int snd_jack_new(struct snd_card *card, const char *id, int type, 89int snd_jack_new(struct snd_card *card, const char *id, int type,
88 struct snd_jack **jack); 90 struct snd_jack **jack, bool initial_kctl, bool phantom_jack);
91int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask);
89void snd_jack_set_parent(struct snd_jack *jack, struct device *parent); 92void snd_jack_set_parent(struct snd_jack *jack, struct device *parent);
90int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type, 93int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
91 int keytype); 94 int keytype);
@@ -93,9 +96,13 @@ int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
93void snd_jack_report(struct snd_jack *jack, int status); 96void snd_jack_report(struct snd_jack *jack, int status);
94 97
95#else 98#else
96
97static inline int snd_jack_new(struct snd_card *card, const char *id, int type, 99static inline int snd_jack_new(struct snd_card *card, const char *id, int type,
98 struct snd_jack **jack) 100 struct snd_jack **jack, bool initial_kctl, bool phantom_jack)
101{
102 return 0;
103}
104
105static inline int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask)
99{ 106{
100 return 0; 107 return 0;
101} 108}
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 0cb7f3f5df7b..691e7ee0a510 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -224,9 +224,10 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params,
224 224
225struct snd_pcm_hw_rule { 225struct snd_pcm_hw_rule {
226 unsigned int cond; 226 unsigned int cond;
227 snd_pcm_hw_rule_func_t func;
228 int var; 227 int var;
229 int deps[4]; 228 int deps[4];
229
230 snd_pcm_hw_rule_func_t func;
230 void *private; 231 void *private;
231}; 232};
232 233
@@ -273,8 +274,8 @@ struct snd_pcm_hw_constraint_ratdens {
273}; 274};
274 275
275struct snd_pcm_hw_constraint_list { 276struct snd_pcm_hw_constraint_list {
276 unsigned int count;
277 const unsigned int *list; 277 const unsigned int *list;
278 unsigned int count;
278 unsigned int mask; 279 unsigned int mask;
279}; 280};
280 281
diff --git a/include/sound/pcm_drm_eld.h b/include/sound/pcm_drm_eld.h
new file mode 100644
index 000000000000..93357b25d2e2
--- /dev/null
+++ b/include/sound/pcm_drm_eld.h
@@ -0,0 +1,6 @@
1#ifndef __SOUND_PCM_DRM_ELD_H
2#define __SOUND_PCM_DRM_ELD_H
3
4int snd_pcm_hw_constraint_eld(struct snd_pcm_runtime *runtime, void *eld);
5
6#endif
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
new file mode 100644
index 000000000000..0eed397aca8e
--- /dev/null
+++ b/include/sound/pcm_iec958.h
@@ -0,0 +1,9 @@
1#ifndef __SOUND_PCM_IEC958_H
2#define __SOUND_PCM_IEC958_H
3
4#include <linux/types.h>
5
6int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
7 size_t len);
8
9#endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index 120d9610054e..22734bc3ffd4 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -15,17 +15,11 @@ struct rt5645_platform_data {
15 /* IN2 can optionally be differential */ 15 /* IN2 can optionally be differential */
16 bool in2_diff; 16 bool in2_diff;
17 17
18 bool dmic_en;
19 unsigned int dmic1_data_pin; 18 unsigned int dmic1_data_pin;
20 /* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */ 19 /* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
21 unsigned int dmic2_data_pin; 20 unsigned int dmic2_data_pin;
22 /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */ 21 /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
23 22
24 unsigned int hp_det_gpio;
25 bool gpio_hp_det_active_high;
26
27 /* true if codec's jd function is used */
28 bool en_jd_func;
29 unsigned int jd_mode; 23 unsigned int jd_mode;
30}; 24};
31 25
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 1065095c6973..37d95a898275 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -15,6 +15,8 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <sound/control.h> 17#include <sound/control.h>
18#include <sound/soc-topology.h>
19#include <sound/asoc.h>
18 20
19struct device; 21struct device;
20 22
@@ -107,6 +109,10 @@ struct device;
107{ .id = snd_soc_dapm_mux, .name = wname, \ 109{ .id = snd_soc_dapm_mux, .name = wname, \
108 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \ 110 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
109 .kcontrol_news = wcontrols, .num_kcontrols = 1} 111 .kcontrol_news = wcontrols, .num_kcontrols = 1}
112#define SND_SOC_DAPM_DEMUX(wname, wreg, wshift, winvert, wcontrols) \
113{ .id = snd_soc_dapm_demux, .name = wname, \
114 SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
115 .kcontrol_news = wcontrols, .num_kcontrols = 1}
110 116
111/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */ 117/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
112#define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\ 118#define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\
@@ -444,11 +450,15 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
444struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm( 450struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
445 struct snd_kcontrol *kcontrol); 451 struct snd_kcontrol *kcontrol);
446 452
453int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm,
454 enum snd_soc_bias_level level);
455
447/* dapm widget types */ 456/* dapm widget types */
448enum snd_soc_dapm_type { 457enum snd_soc_dapm_type {
449 snd_soc_dapm_input = 0, /* input pin */ 458 snd_soc_dapm_input = 0, /* input pin */
450 snd_soc_dapm_output, /* output pin */ 459 snd_soc_dapm_output, /* output pin */
451 snd_soc_dapm_mux, /* selects 1 analog signal from many inputs */ 460 snd_soc_dapm_mux, /* selects 1 analog signal from many inputs */
461 snd_soc_dapm_demux, /* connects the input to one of multiple outputs */
452 snd_soc_dapm_mixer, /* mixes several analog signals together */ 462 snd_soc_dapm_mixer, /* mixes several analog signals together */
453 snd_soc_dapm_mixer_named_ctl, /* mixer with named controls */ 463 snd_soc_dapm_mixer_named_ctl, /* mixer with named controls */
454 snd_soc_dapm_pga, /* programmable gain/attenuation (volume) */ 464 snd_soc_dapm_pga, /* programmable gain/attenuation (volume) */
@@ -563,6 +573,7 @@ struct snd_soc_dapm_widget {
563 int num_kcontrols; 573 int num_kcontrols;
564 const struct snd_kcontrol_new *kcontrol_news; 574 const struct snd_kcontrol_new *kcontrol_news;
565 struct snd_kcontrol **kcontrols; 575 struct snd_kcontrol **kcontrols;
576 struct snd_soc_dobj dobj;
566 577
567 /* widget input and outputs */ 578 /* widget input and outputs */
568 struct list_head sources; 579 struct list_head sources;
@@ -585,6 +596,10 @@ struct snd_soc_dapm_update {
585 int val; 596 int val;
586}; 597};
587 598
599struct snd_soc_dapm_wcache {
600 struct snd_soc_dapm_widget *widget;
601};
602
588/* DAPM context */ 603/* DAPM context */
589struct snd_soc_dapm_context { 604struct snd_soc_dapm_context {
590 enum snd_soc_bias_level bias_level; 605 enum snd_soc_bias_level bias_level;
@@ -606,6 +621,9 @@ struct snd_soc_dapm_context {
606 int (*set_bias_level)(struct snd_soc_dapm_context *dapm, 621 int (*set_bias_level)(struct snd_soc_dapm_context *dapm,
607 enum snd_soc_bias_level level); 622 enum snd_soc_bias_level level);
608 623
624 struct snd_soc_dapm_wcache path_sink_cache;
625 struct snd_soc_dapm_wcache path_source_cache;
626
609#ifdef CONFIG_DEBUG_FS 627#ifdef CONFIG_DEBUG_FS
610 struct dentry *debugfs_dapm; 628 struct dentry *debugfs_dapm;
611#endif 629#endif
@@ -623,4 +641,35 @@ struct snd_soc_dapm_stats {
623 int neighbour_checks; 641 int neighbour_checks;
624}; 642};
625 643
644/**
645 * snd_soc_dapm_init_bias_level() - Initialize DAPM bias level
646 * @dapm: The DAPM context to initialize
647 * @level: The DAPM level to initialize to
648 *
649 * This function only sets the driver internal state of the DAPM level and will
650 * not modify the state of the device. Hence it should not be used during normal
651 * operation, but only to synchronize the internal state to the device state.
652 * E.g. during driver probe to set the DAPM level to the one corresponding with
653 * the power-on reset state of the device.
654 *
655 * To change the DAPM state of the device use snd_soc_dapm_set_bias_level().
656 */
657static inline void snd_soc_dapm_init_bias_level(
658 struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level)
659{
660 dapm->bias_level = level;
661}
662
663/**
664 * snd_soc_dapm_get_bias_level() - Get current DAPM bias level
665 * @dapm: The context for which to get the bias level
666 *
667 * Returns: The current bias level of the passed DAPM context.
668 */
669static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
670 struct snd_soc_dapm_context *dapm)
671{
672 return dapm->bias_level;
673}
674
626#endif 675#endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
new file mode 100644
index 000000000000..865a141b118b
--- /dev/null
+++ b/include/sound/soc-topology.h
@@ -0,0 +1,168 @@
1/*
2 * linux/sound/soc-topology.h -- ALSA SoC Firmware Controls and DAPM
3 *
4 * Copyright (C) 2012 Texas Instruments Inc.
5 * Copyright (C) 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
12 * algorithms, equalisers, DAIs, widgets, FE caps, BE caps, codec link caps etc.
13 */
14
15#ifndef __LINUX_SND_SOC_TPLG_H
16#define __LINUX_SND_SOC_TPLG_H
17
18#include <sound/asoc.h>
19#include <linux/list.h>
20
21struct firmware;
22struct snd_kcontrol;
23struct snd_soc_tplg_pcm_be;
24struct snd_ctl_elem_value;
25struct snd_ctl_elem_info;
26struct snd_soc_dapm_widget;
27struct snd_soc_component;
28struct snd_soc_tplg_pcm_fe;
29struct snd_soc_dapm_context;
30struct snd_soc_card;
31
32/* object scan be loaded and unloaded in groups with identfying indexes */
33#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
34
35/* dynamic object type */
36enum snd_soc_dobj_type {
37 SND_SOC_DOBJ_NONE = 0, /* object is not dynamic */
38 SND_SOC_DOBJ_MIXER,
39 SND_SOC_DOBJ_ENUM,
40 SND_SOC_DOBJ_BYTES,
41 SND_SOC_DOBJ_PCM,
42 SND_SOC_DOBJ_DAI_LINK,
43 SND_SOC_DOBJ_CODEC_LINK,
44 SND_SOC_DOBJ_WIDGET,
45};
46
47/* dynamic control object */
48struct snd_soc_dobj_control {
49 struct snd_kcontrol *kcontrol;
50 char **dtexts;
51 unsigned long *dvalues;
52};
53
54/* dynamic widget object */
55struct snd_soc_dobj_widget {
56 unsigned int kcontrol_enum:1; /* this widget is an enum kcontrol */
57};
58
59/* dynamic PCM DAI object */
60struct snd_soc_dobj_pcm_dai {
61 struct snd_soc_tplg_pcm_dai *pd;
62 unsigned int count;
63};
64
65/* generic dynamic object - all dynamic objects belong to this struct */
66struct snd_soc_dobj {
67 enum snd_soc_dobj_type type;
68 unsigned int index; /* objects can belong in different groups */
69 struct list_head list;
70 struct snd_soc_tplg_ops *ops;
71 union {
72 struct snd_soc_dobj_control control;
73 struct snd_soc_dobj_widget widget;
74 struct snd_soc_dobj_pcm_dai pcm_dai;
75 };
76 void *private; /* core does not touch this */
77};
78
79/*
80 * Kcontrol operations - used to map handlers onto firmware based controls.
81 */
82struct snd_soc_tplg_kcontrol_ops {
83 u32 id;
84 int (*get)(struct snd_kcontrol *kcontrol,
85 struct snd_ctl_elem_value *ucontrol);
86 int (*put)(struct snd_kcontrol *kcontrol,
87 struct snd_ctl_elem_value *ucontrol);
88 int (*info)(struct snd_kcontrol *kcontrol,
89 struct snd_ctl_elem_info *uinfo);
90};
91
92/*
93 * DAPM widget event handlers - used to map handlers onto widgets.
94 */
95struct snd_soc_tplg_widget_events {
96 u16 type;
97 int (*event_handler)(struct snd_soc_dapm_widget *w,
98 struct snd_kcontrol *k, int event);
99};
100
101/*
102 * Public API - Used by component drivers to load and unload dynamic objects
103 * and their resources.
104 */
105struct snd_soc_tplg_ops {
106
107 /* external kcontrol init - used for any driver specific init */
108 int (*control_load)(struct snd_soc_component *,
109 struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *);
110 int (*control_unload)(struct snd_soc_component *,
111 struct snd_soc_dobj *);
112
113 /* external widget init - used for any driver specific init */
114 int (*widget_load)(struct snd_soc_component *,
115 struct snd_soc_dapm_widget *,
116 struct snd_soc_tplg_dapm_widget *);
117 int (*widget_unload)(struct snd_soc_component *,
118 struct snd_soc_dobj *);
119
120 /* FE - used for any driver specific init */
121 int (*pcm_dai_load)(struct snd_soc_component *,
122 struct snd_soc_tplg_pcm_dai *pcm_dai, int num_fe);
123 int (*pcm_dai_unload)(struct snd_soc_component *,
124 struct snd_soc_dobj *);
125
126 /* callback to handle vendor bespoke data */
127 int (*vendor_load)(struct snd_soc_component *,
128 struct snd_soc_tplg_hdr *);
129 int (*vendor_unload)(struct snd_soc_component *,
130 struct snd_soc_tplg_hdr *);
131
132 /* completion - called at completion of firmware loading */
133 void (*complete)(struct snd_soc_component *);
134
135 /* manifest - optional to inform component of manifest */
136 int (*manifest)(struct snd_soc_component *,
137 struct snd_soc_tplg_manifest *);
138
139 /* bespoke kcontrol handlers available for binding */
140 const struct snd_soc_tplg_kcontrol_ops *io_ops;
141 int io_ops_count;
142};
143
144/* gets a pointer to data from the firmware block header */
145static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
146{
147 const void *ptr = hdr;
148
149 return ptr + sizeof(*hdr);
150}
151
152/* Dynamic Object loading and removal for component drivers */
153int snd_soc_tplg_component_load(struct snd_soc_component *comp,
154 struct snd_soc_tplg_ops *ops, const struct firmware *fw,
155 u32 index);
156int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index);
157
158/* Widget removal - widgets also removed wth component API */
159void snd_soc_tplg_widget_remove(struct snd_soc_dapm_widget *w);
160void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
161 u32 index);
162
163/* Binds event handlers to dynamic widgets */
164int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
165 const struct snd_soc_tplg_widget_events *events, int num_events,
166 u16 event_type);
167
168#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f6226914acfe..93df8bf9d54a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -27,6 +27,7 @@
27#include <sound/compress_driver.h> 27#include <sound/compress_driver.h>
28#include <sound/control.h> 28#include <sound/control.h>
29#include <sound/ac97_codec.h> 29#include <sound/ac97_codec.h>
30#include <sound/soc-topology.h>
30 31
31/* 32/*
32 * Convenience kcontrol builders 33 * Convenience kcontrol builders
@@ -190,8 +191,12 @@
190#define SOC_VALUE_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xmask, xitems, xtexts, xvalues) \ 191#define SOC_VALUE_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xmask, xitems, xtexts, xvalues) \
191{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \ 192{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \
192 .mask = xmask, .items = xitems, .texts = xtexts, .values = xvalues} 193 .mask = xmask, .items = xitems, .texts = xtexts, .values = xvalues}
193#define SOC_VALUE_ENUM_SINGLE(xreg, xshift, xmask, xnitmes, xtexts, xvalues) \ 194#define SOC_VALUE_ENUM_SINGLE(xreg, xshift, xmask, xitems, xtexts, xvalues) \
194 SOC_VALUE_ENUM_DOUBLE(xreg, xshift, xshift, xmask, xnitmes, xtexts, xvalues) 195 SOC_VALUE_ENUM_DOUBLE(xreg, xshift, xshift, xmask, xitems, xtexts, xvalues)
196#define SOC_VALUE_ENUM_SINGLE_AUTODISABLE(xreg, xshift, xmask, xitems, xtexts, xvalues) \
197{ .reg = xreg, .shift_l = xshift, .shift_r = xshift, \
198 .mask = xmask, .items = xitems, .texts = xtexts, \
199 .values = xvalues, .autodisable = 1}
195#define SOC_ENUM_SINGLE_VIRT(xitems, xtexts) \ 200#define SOC_ENUM_SINGLE_VIRT(xitems, xtexts) \
196 SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, xitems, xtexts) 201 SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, xitems, xtexts)
197#define SOC_ENUM(xname, xenum) \ 202#define SOC_ENUM(xname, xenum) \
@@ -312,6 +317,11 @@
312 ARRAY_SIZE(xtexts), xtexts, xvalues) 317 ARRAY_SIZE(xtexts), xtexts, xvalues)
313#define SOC_VALUE_ENUM_SINGLE_DECL(name, xreg, xshift, xmask, xtexts, xvalues) \ 318#define SOC_VALUE_ENUM_SINGLE_DECL(name, xreg, xshift, xmask, xtexts, xvalues) \
314 SOC_VALUE_ENUM_DOUBLE_DECL(name, xreg, xshift, xshift, xmask, xtexts, xvalues) 319 SOC_VALUE_ENUM_DOUBLE_DECL(name, xreg, xshift, xshift, xmask, xtexts, xvalues)
320
321#define SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(name, xreg, xshift, xmask, xtexts, xvalues) \
322 const struct soc_enum name = SOC_VALUE_ENUM_SINGLE_AUTODISABLE(xreg, \
323 xshift, xmask, ARRAY_SIZE(xtexts), xtexts, xvalues)
324
315#define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \ 325#define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \
316 const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts) 326 const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts)
317 327
@@ -767,6 +777,9 @@ struct snd_soc_component {
767 777
768 struct mutex io_mutex; 778 struct mutex io_mutex;
769 779
780 /* attached dynamic objects */
781 struct list_head dobj_list;
782
770#ifdef CONFIG_DEBUG_FS 783#ifdef CONFIG_DEBUG_FS
771 struct dentry *debugfs_root; 784 struct dentry *debugfs_root;
772#endif 785#endif
@@ -819,7 +832,7 @@ struct snd_soc_codec {
819 /* component */ 832 /* component */
820 struct snd_soc_component component; 833 struct snd_soc_component component;
821 834
822 /* dapm */ 835 /* Don't access this directly, use snd_soc_codec_get_dapm() */
823 struct snd_soc_dapm_context dapm; 836 struct snd_soc_dapm_context dapm;
824 837
825#ifdef CONFIG_DEBUG_FS 838#ifdef CONFIG_DEBUG_FS
@@ -961,6 +974,24 @@ struct snd_soc_dai_link {
961 974
962 enum snd_soc_dpcm_trigger trigger[2]; /* trigger type for DPCM */ 975 enum snd_soc_dpcm_trigger trigger[2]; /* trigger type for DPCM */
963 976
977 /* codec/machine specific init - e.g. add machine controls */
978 int (*init)(struct snd_soc_pcm_runtime *rtd);
979
980 /* optional hw_params re-writing for BE and FE sync */
981 int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
982 struct snd_pcm_hw_params *params);
983
984 /* machine stream operations */
985 const struct snd_soc_ops *ops;
986 const struct snd_soc_compr_ops *compr_ops;
987
988 /* For unidirectional dai links */
989 bool playback_only;
990 bool capture_only;
991
992 /* Mark this pcm with non atomic ops */
993 bool nonatomic;
994
964 /* Keep DAI active over suspend */ 995 /* Keep DAI active over suspend */
965 unsigned int ignore_suspend:1; 996 unsigned int ignore_suspend:1;
966 997
@@ -969,9 +1000,6 @@ struct snd_soc_dai_link {
969 unsigned int symmetric_channels:1; 1000 unsigned int symmetric_channels:1;
970 unsigned int symmetric_samplebits:1; 1001 unsigned int symmetric_samplebits:1;
971 1002
972 /* Mark this pcm with non atomic ops */
973 bool nonatomic;
974
975 /* Do not create a PCM for this DAI link (Backend link) */ 1003 /* Do not create a PCM for this DAI link (Backend link) */
976 unsigned int no_pcm:1; 1004 unsigned int no_pcm:1;
977 1005
@@ -982,23 +1010,11 @@ struct snd_soc_dai_link {
982 unsigned int dpcm_capture:1; 1010 unsigned int dpcm_capture:1;
983 unsigned int dpcm_playback:1; 1011 unsigned int dpcm_playback:1;
984 1012
1013 /* DPCM used FE & BE merged format */
1014 unsigned int dpcm_merged_format:1;
1015
985 /* pmdown_time is ignored at stop */ 1016 /* pmdown_time is ignored at stop */
986 unsigned int ignore_pmdown_time:1; 1017 unsigned int ignore_pmdown_time:1;
987
988 /* codec/machine specific init - e.g. add machine controls */
989 int (*init)(struct snd_soc_pcm_runtime *rtd);
990
991 /* optional hw_params re-writing for BE and FE sync */
992 int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
993 struct snd_pcm_hw_params *params);
994
995 /* machine stream operations */
996 const struct snd_soc_ops *ops;
997 const struct snd_soc_compr_ops *compr_ops;
998
999 /* For unidirectional dai links */
1000 bool playback_only;
1001 bool capture_only;
1002}; 1018};
1003 1019
1004struct snd_soc_codec_conf { 1020struct snd_soc_codec_conf {
@@ -1111,6 +1127,9 @@ struct snd_soc_card {
1111 struct list_head dapm_list; 1127 struct list_head dapm_list;
1112 struct list_head dapm_dirty; 1128 struct list_head dapm_dirty;
1113 1129
1130 /* attached dynamic objects */
1131 struct list_head dobj_list;
1132
1114 /* Generic DAPM context for the card */ 1133 /* Generic DAPM context for the card */
1115 struct snd_soc_dapm_context dapm; 1134 struct snd_soc_dapm_context dapm;
1116 struct snd_soc_dapm_stats dapm_stats; 1135 struct snd_soc_dapm_stats dapm_stats;
@@ -1170,6 +1189,7 @@ struct soc_mixer_control {
1170 unsigned int sign_bit; 1189 unsigned int sign_bit;
1171 unsigned int invert:1; 1190 unsigned int invert:1;
1172 unsigned int autodisable:1; 1191 unsigned int autodisable:1;
1192 struct snd_soc_dobj dobj;
1173}; 1193};
1174 1194
1175struct soc_bytes { 1195struct soc_bytes {
@@ -1180,6 +1200,8 @@ struct soc_bytes {
1180 1200
1181struct soc_bytes_ext { 1201struct soc_bytes_ext {
1182 int max; 1202 int max;
1203 struct snd_soc_dobj dobj;
1204
1183 /* used for TLV byte control */ 1205 /* used for TLV byte control */
1184 int (*get)(unsigned int __user *bytes, unsigned int size); 1206 int (*get)(unsigned int __user *bytes, unsigned int size);
1185 int (*put)(const unsigned int __user *bytes, unsigned int size); 1207 int (*put)(const unsigned int __user *bytes, unsigned int size);
@@ -1200,6 +1222,8 @@ struct soc_enum {
1200 unsigned int mask; 1222 unsigned int mask;
1201 const char * const *texts; 1223 const char * const *texts;
1202 const unsigned int *values; 1224 const unsigned int *values;
1225 unsigned int autodisable:1;
1226 struct snd_soc_dobj dobj;
1203}; 1227};
1204 1228
1205/** 1229/**
@@ -1282,6 +1306,58 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
1282} 1306}
1283 1307
1284/** 1308/**
1309 * snd_soc_codec_get_dapm() - Returns the DAPM context for the CODEC
1310 * @codec: The CODEC for which to get the DAPM context
1311 *
1312 * Note: Use this function instead of directly accessing the CODEC's dapm field
1313 */
1314static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
1315 struct snd_soc_codec *codec)
1316{
1317 return &codec->dapm;
1318}
1319
1320/**
1321 * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
1322 * @dapm: The CODEC for which to initialize the DAPM bias level
1323 * @level: The DAPM level to initialize to
1324 *
1325 * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
1326 */
1327static inline void snd_soc_codec_init_bias_level(struct snd_soc_codec *codec,
1328 enum snd_soc_bias_level level)
1329{
1330 snd_soc_dapm_init_bias_level(snd_soc_codec_get_dapm(codec), level);
1331}
1332
1333/**
1334 * snd_soc_dapm_get_bias_level() - Get current CODEC DAPM bias level
1335 * @codec: The CODEC for which to get the DAPM bias level
1336 *
1337 * Returns: The current DAPM bias level of the CODEC.
1338 */
1339static inline enum snd_soc_bias_level snd_soc_codec_get_bias_level(
1340 struct snd_soc_codec *codec)
1341{
1342 return snd_soc_dapm_get_bias_level(snd_soc_codec_get_dapm(codec));
1343}
1344
1345/**
1346 * snd_soc_codec_force_bias_level() - Set the CODEC DAPM bias level
1347 * @codec: The CODEC for which to set the level
1348 * @level: The level to set to
1349 *
1350 * Forces the CODEC bias level to a specific state. See
1351 * snd_soc_dapm_force_bias_level().
1352 */
1353static inline int snd_soc_codec_force_bias_level(struct snd_soc_codec *codec,
1354 enum snd_soc_bias_level level)
1355{
1356 return snd_soc_dapm_force_bias_level(snd_soc_codec_get_dapm(codec),
1357 level);
1358}
1359
1360/**
1285 * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol 1361 * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
1286 * @kcontrol: The kcontrol 1362 * @kcontrol: The kcontrol
1287 * 1363 *
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index e11e179420a1..df97d1966468 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -31,12 +31,7 @@
31 * ~(sizeof(unsigned int) - 1)) .... 31 * ~(sizeof(unsigned int) - 1)) ....
32 */ 32 */
33 33
34#define SNDRV_CTL_TLVT_CONTAINER 0 /* one level down - group of TLVs */ 34#include <uapi/sound/tlv.h>
35#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */
36#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */
37#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */
38#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
39#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
40 35
41#define TLV_ITEM(type, ...) \ 36#define TLV_ITEM(type, ...) \
42 (type), TLV_LENGTH(__VA_ARGS__), __VA_ARGS__ 37 (type), TLV_LENGTH(__VA_ARGS__), __VA_ARGS__
@@ -90,12 +85,4 @@
90 85
91#define TLV_DB_GAIN_MUTE -9999999 86#define TLV_DB_GAIN_MUTE -9999999
92 87
93/*
94 * channel-mapping TLV items
95 * TLV length must match with num_channels
96 */
97#define SNDRV_CTL_TLVT_CHMAP_FIXED 0x101 /* fixed channel position */
98#define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */
99#define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */
100
101#endif /* __SOUND_TLV_H */ 88#endif /* __SOUND_TLV_H */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 54e7af301888..0aedbb2c10e0 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -5,7 +5,6 @@
5#include <linux/configfs.h> 5#include <linux/configfs.h>
6#include <net/sock.h> 6#include <net/sock.h>
7#include <net/tcp.h> 7#include <net/tcp.h>
8#include <scsi/scsi_cmnd.h>
9#include <scsi/iscsi_proto.h> 8#include <scsi/iscsi_proto.h>
10#include <target/target_core_base.h> 9#include <target/target_core_base.h>
11 10
@@ -248,10 +247,6 @@ struct iscsi_conn_ops {
248 u8 DataDigest; /* [0,1] == [None,CRC32C] */ 247 u8 DataDigest; /* [0,1] == [None,CRC32C] */
249 u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */ 248 u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
250 u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */ 249 u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
251 u8 OFMarker; /* [0,1] == [No,Yes] */
252 u8 IFMarker; /* [0,1] == [No,Yes] */
253 u32 OFMarkInt; /* [1..65535] */
254 u32 IFMarkInt; /* [1..65535] */
255 /* 250 /*
256 * iSER specific connection parameters 251 * iSER specific connection parameters
257 */ 252 */
@@ -532,12 +527,6 @@ struct iscsi_conn {
532 u32 exp_statsn; 527 u32 exp_statsn;
533 /* Per connection status sequence number */ 528 /* Per connection status sequence number */
534 u32 stat_sn; 529 u32 stat_sn;
535 /* IFMarkInt's Current Value */
536 u32 if_marker;
537 /* OFMarkInt's Current Value */
538 u32 of_marker;
539 /* Used for calculating OFMarker offset to next PDU */
540 u32 of_marker_offset;
541#define IPV6_ADDRESS_SPACE 48 530#define IPV6_ADDRESS_SPACE 48
542 unsigned char login_ip[IPV6_ADDRESS_SPACE]; 531 unsigned char login_ip[IPV6_ADDRESS_SPACE];
543 unsigned char local_ip[IPV6_ADDRESS_SPACE]; 532 unsigned char local_ip[IPV6_ADDRESS_SPACE];
@@ -606,6 +595,7 @@ struct iscsi_conn {
606 int bitmap_id; 595 int bitmap_id;
607 int rx_thread_active; 596 int rx_thread_active;
608 struct task_struct *rx_thread; 597 struct task_struct *rx_thread;
598 struct completion rx_login_comp;
609 int tx_thread_active; 599 int tx_thread_active;
610 struct task_struct *tx_thread; 600 struct task_struct *tx_thread;
611 /* list_head for session connection list */ 601 /* list_head for session connection list */
@@ -755,10 +745,10 @@ struct iscsi_node_stat_grps {
755}; 745};
756 746
757struct iscsi_node_acl { 747struct iscsi_node_acl {
748 struct se_node_acl se_node_acl;
758 struct iscsi_node_attrib node_attrib; 749 struct iscsi_node_attrib node_attrib;
759 struct iscsi_node_auth node_auth; 750 struct iscsi_node_auth node_auth;
760 struct iscsi_node_stat_grps node_stat_grps; 751 struct iscsi_node_stat_grps node_stat_grps;
761 struct se_node_acl se_node_acl;
762}; 752};
763 753
764struct iscsi_tpg_attrib { 754struct iscsi_tpg_attrib {
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 5f1225706993..1e5c8f949bae 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -3,18 +3,7 @@
3 3
4#define TRANSPORT_FLAG_PASSTHROUGH 1 4#define TRANSPORT_FLAG_PASSTHROUGH 1
5 5
6struct target_backend_cits { 6struct target_backend_ops {
7 struct config_item_type tb_dev_cit;
8 struct config_item_type tb_dev_attrib_cit;
9 struct config_item_type tb_dev_pr_cit;
10 struct config_item_type tb_dev_wwn_cit;
11 struct config_item_type tb_dev_alua_tg_pt_gps_cit;
12 struct config_item_type tb_dev_stat_cit;
13};
14
15struct se_subsystem_api {
16 struct list_head sub_api_list;
17
18 char name[16]; 7 char name[16];
19 char inquiry_prod[16]; 8 char inquiry_prod[16];
20 char inquiry_rev[4]; 9 char inquiry_rev[4];
@@ -52,7 +41,7 @@ struct se_subsystem_api {
52 int (*format_prot)(struct se_device *); 41 int (*format_prot)(struct se_device *);
53 void (*free_prot)(struct se_device *); 42 void (*free_prot)(struct se_device *);
54 43
55 struct target_backend_cits tb_cits; 44 struct configfs_attribute **tb_dev_attrib_attrs;
56}; 45};
57 46
58struct sbc_ops { 47struct sbc_ops {
@@ -60,12 +49,12 @@ struct sbc_ops {
60 u32, enum dma_data_direction); 49 u32, enum dma_data_direction);
61 sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); 50 sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
62 sense_reason_t (*execute_write_same)(struct se_cmd *cmd); 51 sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
63 sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd); 52 sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
64 sense_reason_t (*execute_unmap)(struct se_cmd *cmd); 53 sector_t lba, sector_t nolb);
65}; 54};
66 55
67int transport_subsystem_register(struct se_subsystem_api *); 56int transport_backend_register(const struct target_backend_ops *);
68void transport_subsystem_release(struct se_subsystem_api *); 57void target_backend_unregister(const struct target_backend_ops *);
69 58
70void target_complete_cmd(struct se_cmd *, u8); 59void target_complete_cmd(struct se_cmd *, u8);
71void target_complete_cmd_with_length(struct se_cmd *, u8, int); 60void target_complete_cmd_with_length(struct se_cmd *, u8, int);
@@ -79,22 +68,19 @@ sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
79u32 sbc_get_device_rev(struct se_device *dev); 68u32 sbc_get_device_rev(struct se_device *dev);
80u32 sbc_get_device_type(struct se_device *dev); 69u32 sbc_get_device_type(struct se_device *dev);
81sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); 70sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
82sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
83 sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
84 sector_t lba, sector_t nolb),
85 void *priv);
86void sbc_dif_generate(struct se_cmd *); 71void sbc_dif_generate(struct se_cmd *);
87sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int, 72sense_reason_t sbc_dif_verify(struct se_cmd *, sector_t, unsigned int,
88 unsigned int, struct scatterlist *, int); 73 unsigned int, struct scatterlist *, int);
89sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int, 74void sbc_dif_copy_prot(struct se_cmd *, unsigned int, bool,
90 unsigned int, struct scatterlist *, int); 75 struct scatterlist *, int);
91sense_reason_t sbc_dif_read_strip(struct se_cmd *);
92
93void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 76void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
94int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); 77int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
95int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); 78int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
96int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); 79int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
97 80
81extern struct configfs_attribute *sbc_attrib_attrs[];
82extern struct configfs_attribute *passthrough_attrib_attrs[];
83
98/* core helpers also used by command snooping in pscsi */ 84/* core helpers also used by command snooping in pscsi */
99void *transport_kmap_data_sg(struct se_cmd *); 85void *transport_kmap_data_sg(struct se_cmd *);
100void transport_kunmap_data_sg(struct se_cmd *); 86void transport_kunmap_data_sg(struct se_cmd *);
@@ -103,39 +89,7 @@ int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
103sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, 89sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
104 struct scatterlist *, u32, struct scatterlist *, u32); 90 struct scatterlist *, u32, struct scatterlist *, u32);
105 91
106void array_free(void *array, int n); 92bool target_lun_is_rdonly(struct se_cmd *);
107
108/* From target_core_configfs.c to setup default backend config_item_types */
109void target_core_setup_sub_cits(struct se_subsystem_api *);
110
111/* attribute helpers from target_core_device.c for backend drivers */
112bool se_dev_check_wce(struct se_device *);
113int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
114int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
115int se_dev_set_unmap_granularity(struct se_device *, u32);
116int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
117int se_dev_set_max_write_same_len(struct se_device *, u32);
118int se_dev_set_emulate_model_alias(struct se_device *, int);
119int se_dev_set_emulate_dpo(struct se_device *, int);
120int se_dev_set_emulate_fua_write(struct se_device *, int);
121int se_dev_set_emulate_fua_read(struct se_device *, int);
122int se_dev_set_emulate_write_cache(struct se_device *, int);
123int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
124int se_dev_set_emulate_tas(struct se_device *, int);
125int se_dev_set_emulate_tpu(struct se_device *, int);
126int se_dev_set_emulate_tpws(struct se_device *, int);
127int se_dev_set_emulate_caw(struct se_device *, int);
128int se_dev_set_emulate_3pc(struct se_device *, int);
129int se_dev_set_pi_prot_type(struct se_device *, int);
130int se_dev_set_pi_prot_format(struct se_device *, int);
131int se_dev_set_enforce_pr_isids(struct se_device *, int);
132int se_dev_set_force_pr_aptpl(struct se_device *, int);
133int se_dev_set_is_nonrot(struct se_device *, int);
134int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
135int se_dev_set_queue_depth(struct se_device *, u32);
136int se_dev_set_max_sectors(struct se_device *, u32);
137int se_dev_set_optimal_sectors(struct se_device *, u32);
138int se_dev_set_block_size(struct se_device *, u32);
139sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, 93sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
140 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); 94 sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
141 95
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
deleted file mode 100644
index 186f7a923570..000000000000
--- a/include/target/target_core_backend_configfs.h
+++ /dev/null
@@ -1,118 +0,0 @@
1#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
2#define TARGET_CORE_BACKEND_CONFIGFS_H
3
4#include <target/configfs_macros.h>
5
6#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
7static ssize_t _backend##_dev_show_attr_##_name( \
8 struct se_dev_attrib *da, \
9 char *page) \
10{ \
11 return snprintf(page, PAGE_SIZE, "%u\n", \
12 (u32)da->da_dev->dev_attrib._name); \
13}
14
15#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
16static ssize_t _backend##_dev_store_attr_##_name( \
17 struct se_dev_attrib *da, \
18 const char *page, \
19 size_t count) \
20{ \
21 unsigned long val; \
22 int ret; \
23 \
24 ret = kstrtoul(page, 0, &val); \
25 if (ret < 0) { \
26 pr_err("kstrtoul() failed with ret: %d\n", ret); \
27 return -EINVAL; \
28 } \
29 ret = se_dev_set_##_name(da->da_dev, (u32)val); \
30 \
31 return (!ret) ? count : -EINVAL; \
32}
33
34#define DEF_TB_DEV_ATTRIB(_backend, _name) \
35DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
36DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
37
38#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
39DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
40
41CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
42#define TB_DEV_ATTR(_backend, _name, _mode) \
43static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
44 __CONFIGFS_EATTR(_name, _mode, \
45 _backend##_dev_show_attr_##_name, \
46 _backend##_dev_store_attr_##_name);
47
48#define TB_DEV_ATTR_RO(_backend, _name) \
49static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
50 __CONFIGFS_EATTR_RO(_name, \
51 _backend##_dev_show_attr_##_name);
52
53/*
54 * Default list of target backend device attributes as defined by
55 * struct se_dev_attrib
56 */
57
58#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
59 DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
60 TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
61 DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
62 TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
63 DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
64 TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
65 DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
66 TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
67 DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
68 TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
69 DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
70 TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
71 DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
72 TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
73 DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
74 TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
75 DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
76 TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
77 DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
78 TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
79 DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
80 TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
81 DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
82 TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
83 DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
84 TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
85 DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
86 TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
87 DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
88 TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
89 DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
90 TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
91 DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
92 TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
93 DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
94 TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
95 DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
96 TB_DEV_ATTR_RO(_backend, hw_block_size); \
97 DEF_TB_DEV_ATTRIB(_backend, block_size); \
98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
101 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
102 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
103 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
104 TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
105 DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
106 TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
107 DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
108 TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
109 DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
110 TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
111 DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
112 TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
113 DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
114 TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
115 DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
116 TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
117
118#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 480e9f82dfea..17ae2d6a4891 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,34 +6,21 @@
6#include <linux/dma-mapping.h> 6#include <linux/dma-mapping.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <linux/percpu_ida.h> 8#include <linux/percpu_ida.h>
9#include <scsi/scsi_cmnd.h>
10#include <net/sock.h> 9#include <net/sock.h>
11#include <net/tcp.h> 10#include <net/tcp.h>
12 11
13#define TARGET_CORE_MOD_VERSION "v4.1.0" 12#define TARGET_CORE_VERSION "v5.0"
14#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
15 13
16/* Maximum Number of LUNs per Target Portal Group */
17/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
18#define TRANSPORT_MAX_LUNS_PER_TPG 256
19/* 14/*
20 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. 15 * Maximum size of a CDB that can be stored in se_cmd without allocating
21 * 16 * memory dynamically for the CDB.
22 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
23 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
24 * 16-byte CDBs by default and require an extra allocation for
25 * 32-byte CDBs to because of legacy issues.
26 *
27 * Within TCM Core there are no such legacy limitiations, so we go ahead
28 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
29 * within all TCM Core and subsystem plugin code.
30 */ 17 */
31#define TCM_MAX_COMMAND_SIZE 32 18#define TCM_MAX_COMMAND_SIZE 32
32/* 19/*
33 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently 20 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
34 * defined 96, but the real limit is 252 (or 260 including the header) 21 * defined 96, but the real limit is 252 (or 260 including the header)
35 */ 22 */
36#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE 23#define TRANSPORT_SENSE_BUFFER 96
37/* Used by transport_send_check_condition_and_sense() */ 24/* Used by transport_send_check_condition_and_sense() */
38#define SPC_SENSE_KEY_OFFSET 2 25#define SPC_SENSE_KEY_OFFSET 2
39#define SPC_ADD_SENSE_LEN_OFFSET 7 26#define SPC_ADD_SENSE_LEN_OFFSET 7
@@ -79,12 +66,6 @@
79#define DA_MAX_WRITE_SAME_LEN 0 66#define DA_MAX_WRITE_SAME_LEN 0
80/* Use a model alias based on the configfs backend device name */ 67/* Use a model alias based on the configfs backend device name */
81#define DA_EMULATE_MODEL_ALIAS 0 68#define DA_EMULATE_MODEL_ALIAS 0
82/* Emulation for Direct Page Out */
83#define DA_EMULATE_DPO 0
84/* Emulation for Forced Unit Access WRITEs */
85#define DA_EMULATE_FUA_WRITE 1
86/* Emulation for Forced Unit Access READs */
87#define DA_EMULATE_FUA_READ 0
88/* Emulation for WriteCache and SYNCHRONIZE_CACHE */ 69/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
89#define DA_EMULATE_WRITE_CACHE 0 70#define DA_EMULATE_WRITE_CACHE 0
90/* Emulation for UNIT ATTENTION Interlock Control */ 71/* Emulation for UNIT ATTENTION Interlock Control */
@@ -125,18 +106,6 @@ enum hba_flags_table {
125 HBA_FLAGS_PSCSI_MODE = 0x02, 106 HBA_FLAGS_PSCSI_MODE = 0x02,
126}; 107};
127 108
128/* struct se_lun->lun_status */
129enum transport_lun_status_table {
130 TRANSPORT_LUN_STATUS_FREE = 0,
131 TRANSPORT_LUN_STATUS_ACTIVE = 1,
132};
133
134/* struct se_portal_group->se_tpg_type */
135enum transport_tpg_type_table {
136 TRANSPORT_TPG_TYPE_NORMAL = 0,
137 TRANSPORT_TPG_TYPE_DISCOVERY = 1,
138};
139
140/* Special transport agnostic struct se_cmd->t_states */ 109/* Special transport agnostic struct se_cmd->t_states */
141enum transport_state_table { 110enum transport_state_table {
142 TRANSPORT_NO_STATE = 0, 111 TRANSPORT_NO_STATE = 0,
@@ -167,14 +136,13 @@ enum se_cmd_flags_table {
167 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, 136 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
168 SCF_COMPARE_AND_WRITE = 0x00080000, 137 SCF_COMPARE_AND_WRITE = 0x00080000,
169 SCF_COMPARE_AND_WRITE_POST = 0x00100000, 138 SCF_COMPARE_AND_WRITE_POST = 0x00100000,
139 SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
170}; 140};
171 141
172/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 142/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
173enum transport_lunflags_table { 143enum transport_lunflags_table {
174 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, 144 TRANSPORT_LUNFLAGS_READ_ONLY = 0x01,
175 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, 145 TRANSPORT_LUNFLAGS_READ_WRITE = 0x02,
176 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
177 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
178}; 146};
179 147
180/* 148/*
@@ -323,22 +291,13 @@ struct t10_alua_tg_pt_gp {
323 struct se_device *tg_pt_gp_dev; 291 struct se_device *tg_pt_gp_dev;
324 struct config_group tg_pt_gp_group; 292 struct config_group tg_pt_gp_group;
325 struct list_head tg_pt_gp_list; 293 struct list_head tg_pt_gp_list;
326 struct list_head tg_pt_gp_mem_list; 294 struct list_head tg_pt_gp_lun_list;
327 struct se_port *tg_pt_gp_alua_port; 295 struct se_lun *tg_pt_gp_alua_lun;
328 struct se_node_acl *tg_pt_gp_alua_nacl; 296 struct se_node_acl *tg_pt_gp_alua_nacl;
329 struct delayed_work tg_pt_gp_transition_work; 297 struct delayed_work tg_pt_gp_transition_work;
330 struct completion *tg_pt_gp_transition_complete; 298 struct completion *tg_pt_gp_transition_complete;
331}; 299};
332 300
333struct t10_alua_tg_pt_gp_member {
334 bool tg_pt_gp_assoc;
335 atomic_t tg_pt_gp_mem_ref_cnt;
336 spinlock_t tg_pt_gp_mem_lock;
337 struct t10_alua_tg_pt_gp *tg_pt_gp;
338 struct se_port *tg_pt;
339 struct list_head tg_pt_gp_mem_list;
340};
341
342struct t10_vpd { 301struct t10_vpd {
343 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; 302 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
344 int protocol_identifier_set; 303 int protocol_identifier_set;
@@ -383,15 +342,16 @@ struct t10_pr_registration {
383 int pr_res_scope; 342 int pr_res_scope;
384 /* Used for fabric initiator WWPNs using a ISID */ 343 /* Used for fabric initiator WWPNs using a ISID */
385 bool isid_present_at_reg; 344 bool isid_present_at_reg;
386 u32 pr_res_mapped_lun; 345 u64 pr_res_mapped_lun;
387 u32 pr_aptpl_target_lun; 346 u64 pr_aptpl_target_lun;
347 u16 tg_pt_sep_rtpi;
388 u32 pr_res_generation; 348 u32 pr_res_generation;
389 u64 pr_reg_bin_isid; 349 u64 pr_reg_bin_isid;
390 u64 pr_res_key; 350 u64 pr_res_key;
391 atomic_t pr_res_holders; 351 atomic_t pr_res_holders;
392 struct se_node_acl *pr_reg_nacl; 352 struct se_node_acl *pr_reg_nacl;
353 /* Used by ALL_TG_PT=1 registration with deve->pr_ref taken */
393 struct se_dev_entry *pr_reg_deve; 354 struct se_dev_entry *pr_reg_deve;
394 struct se_lun *pr_reg_tg_pt_lun;
395 struct list_head pr_reg_list; 355 struct list_head pr_reg_list;
396 struct list_head pr_reg_abort_list; 356 struct list_head pr_reg_abort_list;
397 struct list_head pr_reg_aptpl_list; 357 struct list_head pr_reg_aptpl_list;
@@ -431,7 +391,7 @@ struct se_tmr_req {
431 u8 response; 391 u8 response;
432 int call_transport; 392 int call_transport;
433 /* Reference to ITT that Task Mgmt should be performed */ 393 /* Reference to ITT that Task Mgmt should be performed */
434 u32 ref_task_tag; 394 u64 ref_task_tag;
435 void *fabric_tmr_ptr; 395 void *fabric_tmr_ptr;
436 struct se_cmd *task_cmd; 396 struct se_cmd *task_cmd;
437 struct se_device *tmr_dev; 397 struct se_device *tmr_dev;
@@ -484,6 +444,7 @@ struct se_cmd {
484 u8 scsi_asc; 444 u8 scsi_asc;
485 u8 scsi_ascq; 445 u8 scsi_ascq;
486 u16 scsi_sense_length; 446 u16 scsi_sense_length;
447 u64 tag; /* SAM command identifier aka task tag */
487 /* Delay for ALUA Active/NonOptimized state access in milliseconds */ 448 /* Delay for ALUA Active/NonOptimized state access in milliseconds */
488 int alua_nonop_delay; 449 int alua_nonop_delay;
489 /* See include/linux/dma-mapping.h */ 450 /* See include/linux/dma-mapping.h */
@@ -502,7 +463,7 @@ struct se_cmd {
502 /* Total size in bytes associated with command */ 463 /* Total size in bytes associated with command */
503 u32 data_length; 464 u32 data_length;
504 u32 residual_count; 465 u32 residual_count;
505 u32 orig_fe_lun; 466 u64 orig_fe_lun;
506 /* Persistent Reservation key */ 467 /* Persistent Reservation key */
507 u64 pr_res_key; 468 u64 pr_res_key;
508 /* Used for sense data */ 469 /* Used for sense data */
@@ -510,7 +471,6 @@ struct se_cmd {
510 struct list_head se_delayed_node; 471 struct list_head se_delayed_node;
511 struct list_head se_qf_node; 472 struct list_head se_qf_node;
512 struct se_device *se_dev; 473 struct se_device *se_dev;
513 struct se_dev_entry *se_deve;
514 struct se_lun *se_lun; 474 struct se_lun *se_lun;
515 /* Only used for internal passthrough and legacy TCM fabric modules */ 475 /* Only used for internal passthrough and legacy TCM fabric modules */
516 struct se_session *se_sess; 476 struct se_session *se_sess;
@@ -520,9 +480,8 @@ struct se_cmd {
520 struct kref cmd_kref; 480 struct kref cmd_kref;
521 const struct target_core_fabric_ops *se_tfo; 481 const struct target_core_fabric_ops *se_tfo;
522 sense_reason_t (*execute_cmd)(struct se_cmd *); 482 sense_reason_t (*execute_cmd)(struct se_cmd *);
523 sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
524 u32, enum dma_data_direction);
525 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); 483 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
484 void *protocol_data;
526 485
527 unsigned char *t_task_cdb; 486 unsigned char *t_task_cdb;
528 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 487 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -578,7 +537,6 @@ struct se_cmd {
578struct se_ua { 537struct se_ua {
579 u8 ua_asc; 538 u8 ua_asc;
580 u8 ua_ascq; 539 u8 ua_ascq;
581 struct se_node_acl *ua_nacl;
582 struct list_head ua_nacl_list; 540 struct list_head ua_nacl_list;
583}; 541};
584 542
@@ -594,10 +552,10 @@ struct se_node_acl {
594 char acl_tag[MAX_ACL_TAG_SIZE]; 552 char acl_tag[MAX_ACL_TAG_SIZE];
595 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 553 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
596 atomic_t acl_pr_ref_count; 554 atomic_t acl_pr_ref_count;
597 struct se_dev_entry **device_list; 555 struct hlist_head lun_entry_hlist;
598 struct se_session *nacl_sess; 556 struct se_session *nacl_sess;
599 struct se_portal_group *se_tpg; 557 struct se_portal_group *se_tpg;
600 spinlock_t device_list_lock; 558 struct mutex lun_entry_mutex;
601 spinlock_t nacl_sess_lock; 559 spinlock_t nacl_sess_lock;
602 struct config_group acl_group; 560 struct config_group acl_group;
603 struct config_group acl_attrib_group; 561 struct config_group acl_attrib_group;
@@ -641,33 +599,37 @@ struct se_ml_stat_grps {
641 599
642struct se_lun_acl { 600struct se_lun_acl {
643 char initiatorname[TRANSPORT_IQN_LEN]; 601 char initiatorname[TRANSPORT_IQN_LEN];
644 u32 mapped_lun; 602 u64 mapped_lun;
645 struct se_node_acl *se_lun_nacl; 603 struct se_node_acl *se_lun_nacl;
646 struct se_lun *se_lun; 604 struct se_lun *se_lun;
647 struct list_head lacl_list;
648 struct config_group se_lun_group; 605 struct config_group se_lun_group;
649 struct se_ml_stat_grps ml_stat_grps; 606 struct se_ml_stat_grps ml_stat_grps;
650}; 607};
651 608
652struct se_dev_entry { 609struct se_dev_entry {
653 bool def_pr_registered;
654 /* See transport_lunflags_table */ 610 /* See transport_lunflags_table */
655 u32 lun_flags; 611 u64 mapped_lun;
656 u32 mapped_lun;
657 u32 total_cmds;
658 u64 pr_res_key; 612 u64 pr_res_key;
659 u64 creation_time; 613 u64 creation_time;
614 u32 lun_flags;
660 u32 attach_count; 615 u32 attach_count;
661 u64 read_bytes; 616 atomic_long_t total_cmds;
662 u64 write_bytes; 617 atomic_long_t read_bytes;
618 atomic_long_t write_bytes;
663 atomic_t ua_count; 619 atomic_t ua_count;
664 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 620 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
665 atomic_t pr_ref_count; 621 struct kref pr_kref;
666 struct se_lun_acl *se_lun_acl; 622 struct completion pr_comp;
623 struct se_lun_acl __rcu *se_lun_acl;
667 spinlock_t ua_lock; 624 spinlock_t ua_lock;
668 struct se_lun *se_lun; 625 struct se_lun __rcu *se_lun;
626#define DEF_PR_REG_ACTIVE 1
627 unsigned long deve_flags;
669 struct list_head alua_port_list; 628 struct list_head alua_port_list;
629 struct list_head lun_link;
670 struct list_head ua_list; 630 struct list_head ua_list;
631 struct hlist_node link;
632 struct rcu_head rcu_head;
671}; 633};
672 634
673struct se_dev_attrib { 635struct se_dev_attrib {
@@ -712,25 +674,48 @@ struct se_port_stat_grps {
712 struct config_group scsi_transport_group; 674 struct config_group scsi_transport_group;
713}; 675};
714 676
677struct scsi_port_stats {
678 atomic_long_t cmd_pdus;
679 atomic_long_t tx_data_octets;
680 atomic_long_t rx_data_octets;
681};
682
715struct se_lun { 683struct se_lun {
684 u64 unpacked_lun;
716#define SE_LUN_LINK_MAGIC 0xffff7771 685#define SE_LUN_LINK_MAGIC 0xffff7771
717 u32 lun_link_magic; 686 u32 lun_link_magic;
718 /* See transport_lun_status_table */
719 enum transport_lun_status_table lun_status;
720 u32 lun_access; 687 u32 lun_access;
721 u32 lun_flags; 688 u32 lun_flags;
722 u32 unpacked_lun; 689 u32 lun_index;
690
691 /* RELATIVE TARGET PORT IDENTIFER */
692 u16 lun_rtpi;
723 atomic_t lun_acl_count; 693 atomic_t lun_acl_count;
724 spinlock_t lun_acl_lock; 694 struct se_device __rcu *lun_se_dev;
725 spinlock_t lun_sep_lock; 695
726 struct completion lun_shutdown_comp; 696 struct list_head lun_deve_list;
727 struct list_head lun_acl_list; 697 spinlock_t lun_deve_lock;
728 struct se_device *lun_se_dev; 698
729 struct se_port *lun_sep; 699 /* ALUA state */
700 int lun_tg_pt_secondary_stat;
701 int lun_tg_pt_secondary_write_md;
702 atomic_t lun_tg_pt_secondary_offline;
703 struct mutex lun_tg_pt_md_mutex;
704
705 /* ALUA target port group linkage */
706 struct list_head lun_tg_pt_gp_link;
707 struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
708 spinlock_t lun_tg_pt_gp_lock;
709
710 struct se_portal_group *lun_tpg;
711 struct scsi_port_stats lun_stats;
730 struct config_group lun_group; 712 struct config_group lun_group;
731 struct se_port_stat_grps port_stat_grps; 713 struct se_port_stat_grps port_stat_grps;
732 struct completion lun_ref_comp; 714 struct completion lun_ref_comp;
733 struct percpu_ref lun_ref; 715 struct percpu_ref lun_ref;
716 struct list_head lun_dev_link;
717 struct hlist_node link;
718 struct rcu_head rcu_head;
734}; 719};
735 720
736struct se_dev_stat_grps { 721struct se_dev_stat_grps {
@@ -753,7 +738,6 @@ struct se_device {
753#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 738#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
754#define DF_USING_UDEV_PATH 0x00000008 739#define DF_USING_UDEV_PATH 0x00000008
755#define DF_USING_ALIAS 0x00000010 740#define DF_USING_ALIAS 0x00000010
756 u32 dev_port_count;
757 /* Physical device queue depth */ 741 /* Physical device queue depth */
758 u32 queue_depth; 742 u32 queue_depth;
759 /* Used for SPC-2 reservations enforce of ISIDs */ 743 /* Used for SPC-2 reservations enforce of ISIDs */
@@ -770,7 +754,7 @@ struct se_device {
770 atomic_t dev_ordered_id; 754 atomic_t dev_ordered_id;
771 atomic_t dev_ordered_sync; 755 atomic_t dev_ordered_sync;
772 atomic_t dev_qf_count; 756 atomic_t dev_qf_count;
773 int export_count; 757 u32 export_count;
774 spinlock_t delayed_cmd_lock; 758 spinlock_t delayed_cmd_lock;
775 spinlock_t execute_task_lock; 759 spinlock_t execute_task_lock;
776 spinlock_t dev_reservation_lock; 760 spinlock_t dev_reservation_lock;
@@ -812,12 +796,15 @@ struct se_device {
812#define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */ 796#define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */
813 unsigned char udev_path[SE_UDEV_PATH_LEN]; 797 unsigned char udev_path[SE_UDEV_PATH_LEN];
814 /* Pointer to template of function pointers for transport */ 798 /* Pointer to template of function pointers for transport */
815 struct se_subsystem_api *transport; 799 const struct target_backend_ops *transport;
816 /* Linked list for struct se_hba struct se_device list */ 800 /* Linked list for struct se_hba struct se_device list */
817 struct list_head dev_list; 801 struct list_head dev_list;
818 struct se_lun xcopy_lun; 802 struct se_lun xcopy_lun;
819 /* Protection Information */ 803 /* Protection Information */
820 int prot_length; 804 int prot_length;
805 /* For se_lun->lun_se_dev RCU read-side critical access */
806 u32 hba_index;
807 struct rcu_head rcu_head;
821}; 808};
822 809
823struct se_hba { 810struct se_hba {
@@ -834,33 +821,7 @@ struct se_hba {
834 spinlock_t device_lock; 821 spinlock_t device_lock;
835 struct config_group hba_group; 822 struct config_group hba_group;
836 struct mutex hba_access_mutex; 823 struct mutex hba_access_mutex;
837 struct se_subsystem_api *transport; 824 struct target_backend *backend;
838};
839
840struct scsi_port_stats {
841 u64 cmd_pdus;
842 u64 tx_data_octets;
843 u64 rx_data_octets;
844};
845
846struct se_port {
847 /* RELATIVE TARGET PORT IDENTIFER */
848 u16 sep_rtpi;
849 int sep_tg_pt_secondary_stat;
850 int sep_tg_pt_secondary_write_md;
851 u32 sep_index;
852 struct scsi_port_stats sep_stats;
853 /* Used for ALUA Target Port Groups membership */
854 atomic_t sep_tg_pt_secondary_offline;
855 /* Used for PR ALL_TG_PT=1 */
856 atomic_t sep_tg_pt_ref_cnt;
857 spinlock_t sep_alua_lock;
858 struct mutex sep_tg_pt_md_mutex;
859 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
860 struct se_lun *sep_lun;
861 struct se_portal_group *sep_tpg;
862 struct list_head sep_alua_list;
863 struct list_head sep_list;
864}; 825};
865 826
866struct se_tpg_np { 827struct se_tpg_np {
@@ -869,24 +830,26 @@ struct se_tpg_np {
869}; 830};
870 831
871struct se_portal_group { 832struct se_portal_group {
872 /* Type of target portal group, see transport_tpg_type_table */ 833 /*
873 enum transport_tpg_type_table se_tpg_type; 834 * PROTOCOL IDENTIFIER value per SPC4, 7.5.1.
835 *
836 * Negative values can be used by fabric drivers for internal use TPGs.
837 */
838 int proto_id;
874 /* Number of ACLed Initiator Nodes for this TPG */ 839 /* Number of ACLed Initiator Nodes for this TPG */
875 u32 num_node_acls; 840 u32 num_node_acls;
876 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 841 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
877 atomic_t tpg_pr_ref_count; 842 atomic_t tpg_pr_ref_count;
878 /* Spinlock for adding/removing ACLed Nodes */ 843 /* Spinlock for adding/removing ACLed Nodes */
879 spinlock_t acl_node_lock; 844 struct mutex acl_node_mutex;
880 /* Spinlock for adding/removing sessions */ 845 /* Spinlock for adding/removing sessions */
881 spinlock_t session_lock; 846 spinlock_t session_lock;
882 spinlock_t tpg_lun_lock; 847 struct mutex tpg_lun_mutex;
883 /* Pointer to $FABRIC_MOD portal group */
884 void *se_tpg_fabric_ptr;
885 struct list_head se_tpg_node; 848 struct list_head se_tpg_node;
886 /* linked list for initiator ACL list */ 849 /* linked list for initiator ACL list */
887 struct list_head acl_node_list; 850 struct list_head acl_node_list;
888 struct se_lun **tpg_lun_list; 851 struct hlist_head tpg_lun_hlist;
889 struct se_lun tpg_virt_lun0; 852 struct se_lun *tpg_virt_lun0;
890 /* List of TCM sessions associated wth this TPG */ 853 /* List of TCM sessions associated wth this TPG */
891 struct list_head tpg_sess_list; 854 struct list_head tpg_sess_list;
892 /* Pointer to $FABRIC_MOD dependent code */ 855 /* Pointer to $FABRIC_MOD dependent code */
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
deleted file mode 100644
index b99c01170392..000000000000
--- a/include/target/target_core_configfs.h
+++ /dev/null
@@ -1,48 +0,0 @@
1#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
2
3#define TARGET_CORE_CONFIG_ROOT "/sys/kernel/config"
4
5#define TARGET_CORE_NAME_MAX_LEN 64
6#define TARGET_FABRIC_NAME_SIZE 32
7
8struct target_fabric_configfs_template {
9 struct config_item_type tfc_discovery_cit;
10 struct config_item_type tfc_wwn_cit;
11 struct config_item_type tfc_wwn_fabric_stats_cit;
12 struct config_item_type tfc_tpg_cit;
13 struct config_item_type tfc_tpg_base_cit;
14 struct config_item_type tfc_tpg_lun_cit;
15 struct config_item_type tfc_tpg_port_cit;
16 struct config_item_type tfc_tpg_port_stat_cit;
17 struct config_item_type tfc_tpg_np_cit;
18 struct config_item_type tfc_tpg_np_base_cit;
19 struct config_item_type tfc_tpg_attrib_cit;
20 struct config_item_type tfc_tpg_auth_cit;
21 struct config_item_type tfc_tpg_param_cit;
22 struct config_item_type tfc_tpg_nacl_cit;
23 struct config_item_type tfc_tpg_nacl_base_cit;
24 struct config_item_type tfc_tpg_nacl_attrib_cit;
25 struct config_item_type tfc_tpg_nacl_auth_cit;
26 struct config_item_type tfc_tpg_nacl_param_cit;
27 struct config_item_type tfc_tpg_nacl_stat_cit;
28 struct config_item_type tfc_tpg_mappedlun_cit;
29 struct config_item_type tfc_tpg_mappedlun_stat_cit;
30};
31
32struct target_fabric_configfs {
33 char tf_name[TARGET_FABRIC_NAME_SIZE];
34 atomic_t tf_access_cnt;
35 struct list_head tf_list;
36 struct config_group tf_group;
37 struct config_group tf_disc_group;
38 struct config_group *tf_default_groups[2];
39 /* Pointer to fabric's config_item */
40 struct config_item *tf_fabric;
41 /* Passed from fabric modules */
42 struct config_item_type *tf_fabric_cit;
43 /* Pointer to fabric's struct module */
44 struct module *tf_module;
45 struct target_core_fabric_ops tf_ops;
46 struct target_fabric_configfs_template tf_cit_tmpl;
47};
48
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 0f4dc3768587..18afef91b447 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -4,20 +4,11 @@
4struct target_core_fabric_ops { 4struct target_core_fabric_ops {
5 struct module *module; 5 struct module *module;
6 const char *name; 6 const char *name;
7 size_t node_acl_size;
7 char *(*get_fabric_name)(void); 8 char *(*get_fabric_name)(void);
8 u8 (*get_fabric_proto_ident)(struct se_portal_group *);
9 char *(*tpg_get_wwn)(struct se_portal_group *); 9 char *(*tpg_get_wwn)(struct se_portal_group *);
10 u16 (*tpg_get_tag)(struct se_portal_group *); 10 u16 (*tpg_get_tag)(struct se_portal_group *);
11 u32 (*tpg_get_default_depth)(struct se_portal_group *); 11 u32 (*tpg_get_default_depth)(struct se_portal_group *);
12 u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
13 struct se_node_acl *,
14 struct t10_pr_registration *, int *,
15 unsigned char *);
16 u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
17 struct se_node_acl *,
18 struct t10_pr_registration *, int *);
19 char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
20 const char *, u32 *, char **);
21 int (*tpg_check_demo_mode)(struct se_portal_group *); 12 int (*tpg_check_demo_mode)(struct se_portal_group *);
22 int (*tpg_check_demo_mode_cache)(struct se_portal_group *); 13 int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
23 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); 14 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
@@ -36,10 +27,6 @@ struct target_core_fabric_ops {
36 * WRITE_STRIP and READ_INSERT operations. 27 * WRITE_STRIP and READ_INSERT operations.
37 */ 28 */
38 int (*tpg_check_prot_fabric_only)(struct se_portal_group *); 29 int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
39 struct se_node_acl *(*tpg_alloc_fabric_acl)(
40 struct se_portal_group *);
41 void (*tpg_release_fabric_acl)(struct se_portal_group *,
42 struct se_node_acl *);
43 u32 (*tpg_get_inst_index)(struct se_portal_group *); 30 u32 (*tpg_get_inst_index)(struct se_portal_group *);
44 /* 31 /*
45 * Optional to release struct se_cmd and fabric dependent allocated 32 * Optional to release struct se_cmd and fabric dependent allocated
@@ -50,7 +37,6 @@ struct target_core_fabric_ops {
50 */ 37 */
51 int (*check_stop_free)(struct se_cmd *); 38 int (*check_stop_free)(struct se_cmd *);
52 void (*release_cmd)(struct se_cmd *); 39 void (*release_cmd)(struct se_cmd *);
53 void (*put_session)(struct se_session *);
54 /* 40 /*
55 * Called with spin_lock_bh(struct se_portal_group->session_lock held. 41 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
56 */ 42 */
@@ -66,7 +52,6 @@ struct target_core_fabric_ops {
66 int (*write_pending)(struct se_cmd *); 52 int (*write_pending)(struct se_cmd *);
67 int (*write_pending_status)(struct se_cmd *); 53 int (*write_pending_status)(struct se_cmd *);
68 void (*set_default_node_attributes)(struct se_node_acl *); 54 void (*set_default_node_attributes)(struct se_node_acl *);
69 u32 (*get_task_tag)(struct se_cmd *);
70 int (*get_cmd_state)(struct se_cmd *); 55 int (*get_cmd_state)(struct se_cmd *);
71 int (*queue_data_in)(struct se_cmd *); 56 int (*queue_data_in)(struct se_cmd *);
72 int (*queue_status)(struct se_cmd *); 57 int (*queue_status)(struct se_cmd *);
@@ -88,9 +73,8 @@ struct target_core_fabric_ops {
88 struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, 73 struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
89 struct config_group *, const char *); 74 struct config_group *, const char *);
90 void (*fabric_drop_np)(struct se_tpg_np *); 75 void (*fabric_drop_np)(struct se_tpg_np *);
91 struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, 76 int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
92 struct config_group *, const char *); 77 void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
93 void (*fabric_drop_nodeacl)(struct se_node_acl *);
94 78
95 struct configfs_attribute **tfc_discovery_attrs; 79 struct configfs_attribute **tfc_discovery_attrs;
96 struct configfs_attribute **tfc_wwn_attrs; 80 struct configfs_attribute **tfc_wwn_attrs;
@@ -132,16 +116,16 @@ void transport_deregister_session(struct se_session *);
132void transport_init_se_cmd(struct se_cmd *, 116void transport_init_se_cmd(struct se_cmd *,
133 const struct target_core_fabric_ops *, 117 const struct target_core_fabric_ops *,
134 struct se_session *, u32, int, int, unsigned char *); 118 struct se_session *, u32, int, int, unsigned char *);
135sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32); 119sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
136sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); 120sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
137int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, 121int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
138 unsigned char *, unsigned char *, u32, u32, int, int, int, 122 unsigned char *, unsigned char *, u64, u32, int, int, int,
139 struct scatterlist *, u32, struct scatterlist *, u32, 123 struct scatterlist *, u32, struct scatterlist *, u32,
140 struct scatterlist *, u32); 124 struct scatterlist *, u32);
141int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 125int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
142 unsigned char *, u32, u32, int, int, int); 126 unsigned char *, u64, u32, int, int, int);
143int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 127int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
144 unsigned char *sense, u32 unpacked_lun, 128 unsigned char *sense, u64 unpacked_lun,
145 void *fabric_tmr_ptr, unsigned char tm_type, 129 void *fabric_tmr_ptr, unsigned char tm_type,
146 gfp_t, unsigned int, int); 130 gfp_t, unsigned int, int);
147int transport_handle_cdb_direct(struct se_cmd *); 131int transport_handle_cdb_direct(struct se_cmd *);
@@ -155,8 +139,8 @@ bool transport_wait_for_tasks(struct se_cmd *);
155int transport_check_aborted_status(struct se_cmd *, int); 139int transport_check_aborted_status(struct se_cmd *, int);
156int transport_send_check_condition_and_sense(struct se_cmd *, 140int transport_send_check_condition_and_sense(struct se_cmd *,
157 sense_reason_t, int); 141 sense_reason_t, int);
158int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); 142int target_get_sess_cmd(struct se_cmd *, bool);
159int target_put_sess_cmd(struct se_session *, struct se_cmd *); 143int target_put_sess_cmd(struct se_cmd *);
160void target_sess_cmd_list_set_waiting(struct se_session *); 144void target_sess_cmd_list_set_waiting(struct se_session *);
161void target_wait_for_sess_cmds(struct se_session *); 145void target_wait_for_sess_cmds(struct se_session *);
162 146
@@ -167,52 +151,19 @@ void core_tmr_release_req(struct se_tmr_req *);
167int transport_generic_handle_tmr(struct se_cmd *); 151int transport_generic_handle_tmr(struct se_cmd *);
168void transport_generic_request_failure(struct se_cmd *, sense_reason_t); 152void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
169void __target_execute_cmd(struct se_cmd *); 153void __target_execute_cmd(struct se_cmd *);
170int transport_lookup_tmr_lun(struct se_cmd *, u32); 154int transport_lookup_tmr_lun(struct se_cmd *, u64);
171 155
172struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, 156struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
173 unsigned char *); 157 unsigned char *);
174struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, 158struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
175 unsigned char *); 159 unsigned char *);
176void core_tpg_clear_object_luns(struct se_portal_group *);
177struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
178 struct se_node_acl *, const char *, u32);
179int core_tpg_del_initiator_node_acl(struct se_portal_group *,
180 struct se_node_acl *, int);
181int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, 160int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
182 unsigned char *, u32, int); 161 unsigned char *, u32, int);
183int core_tpg_set_initiator_node_tag(struct se_portal_group *, 162int core_tpg_set_initiator_node_tag(struct se_portal_group *,
184 struct se_node_acl *, const char *); 163 struct se_node_acl *, const char *);
185int core_tpg_register(const struct target_core_fabric_ops *, 164int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
186 struct se_wwn *, struct se_portal_group *, void *, int);
187int core_tpg_deregister(struct se_portal_group *); 165int core_tpg_deregister(struct se_portal_group *);
188 166
189/* SAS helpers */
190u8 sas_get_fabric_proto_ident(struct se_portal_group *);
191u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
192 struct t10_pr_registration *, int *, unsigned char *);
193u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
194 struct t10_pr_registration *, int *);
195char *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
196 u32 *, char **);
197
198/* FC helpers */
199u8 fc_get_fabric_proto_ident(struct se_portal_group *);
200u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
201 struct t10_pr_registration *, int *, unsigned char *);
202u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
203 struct t10_pr_registration *, int *);
204char *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
205 u32 *, char **);
206
207/* iSCSI helpers */
208u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
209u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
210 struct t10_pr_registration *, int *, unsigned char *);
211u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
212 struct t10_pr_registration *, int *);
213char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
214 u32 *, char **);
215
216/* 167/*
217 * The LIO target core uses DMA_TO_DEVICE to mean that data is going 168 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
218 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean 169 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 02e1003568a4..09b3880105a9 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -87,7 +87,8 @@
87#define DECLARE_TRACE(name, proto, args) 87#define DECLARE_TRACE(name, proto, args)
88 88
89#ifdef CONFIG_EVENT_TRACING 89#ifdef CONFIG_EVENT_TRACING
90#include <trace/ftrace.h> 90#include <trace/trace_events.h>
91#include <trace/perf.h>
91#endif 92#endif
92 93
93#undef TRACE_EVENT 94#undef TRACE_EVENT
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 7f79cf459591..0b73af9be12f 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1117,61 +1117,6 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
1117 TP_ARGS(wq) 1117 TP_ARGS(wq)
1118); 1118);
1119 1119
1120#define show_oper_type(type) \
1121 __print_symbolic(type, \
1122 { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \
1123 { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \
1124 { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \
1125 { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" })
1126
1127DECLARE_EVENT_CLASS(btrfs_qgroup_oper,
1128
1129 TP_PROTO(struct btrfs_qgroup_operation *oper),
1130
1131 TP_ARGS(oper),
1132
1133 TP_STRUCT__entry(
1134 __field( u64, ref_root )
1135 __field( u64, bytenr )
1136 __field( u64, num_bytes )
1137 __field( u64, seq )
1138 __field( int, type )
1139 __field( u64, elem_seq )
1140 ),
1141
1142 TP_fast_assign(
1143 __entry->ref_root = oper->ref_root;
1144 __entry->bytenr = oper->bytenr,
1145 __entry->num_bytes = oper->num_bytes;
1146 __entry->seq = oper->seq;
1147 __entry->type = oper->type;
1148 __entry->elem_seq = oper->elem.seq;
1149 ),
1150
1151 TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, "
1152 "seq = %llu, elem.seq = %llu, type = %s",
1153 (unsigned long long)__entry->ref_root,
1154 (unsigned long long)__entry->bytenr,
1155 (unsigned long long)__entry->num_bytes,
1156 (unsigned long long)__entry->seq,
1157 (unsigned long long)__entry->elem_seq,
1158 show_oper_type(__entry->type))
1159);
1160
1161DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account,
1162
1163 TP_PROTO(struct btrfs_qgroup_operation *oper),
1164
1165 TP_ARGS(oper)
1166);
1167
1168DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref,
1169
1170 TP_PROTO(struct btrfs_qgroup_operation *oper),
1171
1172 TP_ARGS(oper)
1173);
1174
1175#endif /* _TRACE_BTRFS_H */ 1120#endif /* _TRACE_BTRFS_H */
1176 1121
1177/* This part must be outside protection */ 1122/* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 08ec3dd27630..594b4b29a224 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -1185,15 +1185,14 @@ TRACE_EVENT(ext4_da_update_reserve_space,
1185); 1185);
1186 1186
1187TRACE_EVENT(ext4_da_reserve_space, 1187TRACE_EVENT(ext4_da_reserve_space,
1188 TP_PROTO(struct inode *inode, int md_needed), 1188 TP_PROTO(struct inode *inode),
1189 1189
1190 TP_ARGS(inode, md_needed), 1190 TP_ARGS(inode),
1191 1191
1192 TP_STRUCT__entry( 1192 TP_STRUCT__entry(
1193 __field( dev_t, dev ) 1193 __field( dev_t, dev )
1194 __field( ino_t, ino ) 1194 __field( ino_t, ino )
1195 __field( __u64, i_blocks ) 1195 __field( __u64, i_blocks )
1196 __field( int, md_needed )
1197 __field( int, reserved_data_blocks ) 1196 __field( int, reserved_data_blocks )
1198 __field( int, reserved_meta_blocks ) 1197 __field( int, reserved_meta_blocks )
1199 __field( __u16, mode ) 1198 __field( __u16, mode )
@@ -1203,18 +1202,17 @@ TRACE_EVENT(ext4_da_reserve_space,
1203 __entry->dev = inode->i_sb->s_dev; 1202 __entry->dev = inode->i_sb->s_dev;
1204 __entry->ino = inode->i_ino; 1203 __entry->ino = inode->i_ino;
1205 __entry->i_blocks = inode->i_blocks; 1204 __entry->i_blocks = inode->i_blocks;
1206 __entry->md_needed = md_needed;
1207 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; 1205 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
1208 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; 1206 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
1209 __entry->mode = inode->i_mode; 1207 __entry->mode = inode->i_mode;
1210 ), 1208 ),
1211 1209
1212 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d " 1210 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu "
1213 "reserved_data_blocks %d reserved_meta_blocks %d", 1211 "reserved_data_blocks %d reserved_meta_blocks %d",
1214 MAJOR(__entry->dev), MINOR(__entry->dev), 1212 MAJOR(__entry->dev), MINOR(__entry->dev),
1215 (unsigned long) __entry->ino, 1213 (unsigned long) __entry->ino,
1216 __entry->mode, __entry->i_blocks, 1214 __entry->mode, __entry->i_blocks,
1217 __entry->md_needed, __entry->reserved_data_blocks, 1215 __entry->reserved_data_blocks,
1218 __entry->reserved_meta_blocks) 1216 __entry->reserved_meta_blocks)
1219); 1217);
1220 1218
@@ -2478,6 +2476,31 @@ TRACE_EVENT(ext4_collapse_range,
2478 __entry->offset, __entry->len) 2476 __entry->offset, __entry->len)
2479); 2477);
2480 2478
2479TRACE_EVENT(ext4_insert_range,
2480 TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
2481
2482 TP_ARGS(inode, offset, len),
2483
2484 TP_STRUCT__entry(
2485 __field(dev_t, dev)
2486 __field(ino_t, ino)
2487 __field(loff_t, offset)
2488 __field(loff_t, len)
2489 ),
2490
2491 TP_fast_assign(
2492 __entry->dev = inode->i_sb->s_dev;
2493 __entry->ino = inode->i_ino;
2494 __entry->offset = offset;
2495 __entry->len = len;
2496 ),
2497
2498 TP_printk("dev %d,%d ino %lu offset %lld len %lld",
2499 MAJOR(__entry->dev), MINOR(__entry->dev),
2500 (unsigned long) __entry->ino,
2501 __entry->offset, __entry->len)
2502);
2503
2481TRACE_EVENT(ext4_es_shrink, 2504TRACE_EVENT(ext4_es_shrink,
2482 TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time, 2505 TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
2483 int nr_skipped, int retried), 2506 int nr_skipped, int retried),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index e202dec22e1d..04856a2d8c82 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -13,6 +13,10 @@ TRACE_DEFINE_ENUM(NODE);
13TRACE_DEFINE_ENUM(DATA); 13TRACE_DEFINE_ENUM(DATA);
14TRACE_DEFINE_ENUM(META); 14TRACE_DEFINE_ENUM(META);
15TRACE_DEFINE_ENUM(META_FLUSH); 15TRACE_DEFINE_ENUM(META_FLUSH);
16TRACE_DEFINE_ENUM(INMEM);
17TRACE_DEFINE_ENUM(INMEM_DROP);
18TRACE_DEFINE_ENUM(IPU);
19TRACE_DEFINE_ENUM(OPU);
16TRACE_DEFINE_ENUM(CURSEG_HOT_DATA); 20TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
17TRACE_DEFINE_ENUM(CURSEG_WARM_DATA); 21TRACE_DEFINE_ENUM(CURSEG_WARM_DATA);
18TRACE_DEFINE_ENUM(CURSEG_COLD_DATA); 22TRACE_DEFINE_ENUM(CURSEG_COLD_DATA);
@@ -37,6 +41,7 @@ TRACE_DEFINE_ENUM(__REQ_META);
37TRACE_DEFINE_ENUM(CP_UMOUNT); 41TRACE_DEFINE_ENUM(CP_UMOUNT);
38TRACE_DEFINE_ENUM(CP_FASTBOOT); 42TRACE_DEFINE_ENUM(CP_FASTBOOT);
39TRACE_DEFINE_ENUM(CP_SYNC); 43TRACE_DEFINE_ENUM(CP_SYNC);
44TRACE_DEFINE_ENUM(CP_RECOVERY);
40TRACE_DEFINE_ENUM(CP_DISCARD); 45TRACE_DEFINE_ENUM(CP_DISCARD);
41 46
42#define show_block_type(type) \ 47#define show_block_type(type) \
@@ -112,6 +117,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
112 { CP_DISCARD, "Discard" }) 117 { CP_DISCARD, "Discard" })
113 118
114struct victim_sel_policy; 119struct victim_sel_policy;
120struct f2fs_map_blocks;
115 121
116DECLARE_EVENT_CLASS(f2fs__inode, 122DECLARE_EVENT_CLASS(f2fs__inode,
117 123
@@ -476,36 +482,35 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
476 __entry->err) 482 __entry->err)
477); 483);
478 484
479TRACE_EVENT(f2fs_get_data_block, 485TRACE_EVENT(f2fs_map_blocks,
480 TP_PROTO(struct inode *inode, sector_t iblock, 486 TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
481 struct buffer_head *bh, int ret),
482 487
483 TP_ARGS(inode, iblock, bh, ret), 488 TP_ARGS(inode, map, ret),
484 489
485 TP_STRUCT__entry( 490 TP_STRUCT__entry(
486 __field(dev_t, dev) 491 __field(dev_t, dev)
487 __field(ino_t, ino) 492 __field(ino_t, ino)
488 __field(sector_t, iblock) 493 __field(block_t, m_lblk)
489 __field(sector_t, bh_start) 494 __field(block_t, m_pblk)
490 __field(size_t, bh_size) 495 __field(unsigned int, m_len)
491 __field(int, ret) 496 __field(int, ret)
492 ), 497 ),
493 498
494 TP_fast_assign( 499 TP_fast_assign(
495 __entry->dev = inode->i_sb->s_dev; 500 __entry->dev = inode->i_sb->s_dev;
496 __entry->ino = inode->i_ino; 501 __entry->ino = inode->i_ino;
497 __entry->iblock = iblock; 502 __entry->m_lblk = map->m_lblk;
498 __entry->bh_start = bh->b_blocknr; 503 __entry->m_pblk = map->m_pblk;
499 __entry->bh_size = bh->b_size; 504 __entry->m_len = map->m_len;
500 __entry->ret = ret; 505 __entry->ret = ret;
501 ), 506 ),
502 507
503 TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, " 508 TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
504 "start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d", 509 "start blkaddr = 0x%llx, len = 0x%llx, err = %d",
505 show_dev_ino(__entry), 510 show_dev_ino(__entry),
506 (unsigned long long)__entry->iblock, 511 (unsigned long long)__entry->m_lblk,
507 (unsigned long long)__entry->bh_start, 512 (unsigned long long)__entry->m_pblk,
508 (unsigned long long)__entry->bh_size, 513 (unsigned long long)__entry->m_len,
509 __entry->ret) 514 __entry->ret)
510); 515);
511 516
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index d19840b0cac8..284244ebfe8d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -7,7 +7,7 @@
7#include <linux/ktime.h> 7#include <linux/ktime.h>
8#include <linux/pm_qos.h> 8#include <linux/pm_qos.h>
9#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
10#include <linux/ftrace_event.h> 10#include <linux/trace_events.h>
11 11
12#define TPS(x) tracepoint_string(x) 12#define TPS(x) tracepoint_string(x)
13 13
@@ -42,45 +42,54 @@ TRACE_EVENT(pstate_sample,
42 42
43 TP_PROTO(u32 core_busy, 43 TP_PROTO(u32 core_busy,
44 u32 scaled_busy, 44 u32 scaled_busy,
45 u32 state, 45 u32 from,
46 u32 to,
46 u64 mperf, 47 u64 mperf,
47 u64 aperf, 48 u64 aperf,
49 u64 tsc,
48 u32 freq 50 u32 freq
49 ), 51 ),
50 52
51 TP_ARGS(core_busy, 53 TP_ARGS(core_busy,
52 scaled_busy, 54 scaled_busy,
53 state, 55 from,
56 to,
54 mperf, 57 mperf,
55 aperf, 58 aperf,
59 tsc,
56 freq 60 freq
57 ), 61 ),
58 62
59 TP_STRUCT__entry( 63 TP_STRUCT__entry(
60 __field(u32, core_busy) 64 __field(u32, core_busy)
61 __field(u32, scaled_busy) 65 __field(u32, scaled_busy)
62 __field(u32, state) 66 __field(u32, from)
67 __field(u32, to)
63 __field(u64, mperf) 68 __field(u64, mperf)
64 __field(u64, aperf) 69 __field(u64, aperf)
70 __field(u64, tsc)
65 __field(u32, freq) 71 __field(u32, freq)
66 72 ),
67 ),
68 73
69 TP_fast_assign( 74 TP_fast_assign(
70 __entry->core_busy = core_busy; 75 __entry->core_busy = core_busy;
71 __entry->scaled_busy = scaled_busy; 76 __entry->scaled_busy = scaled_busy;
72 __entry->state = state; 77 __entry->from = from;
78 __entry->to = to;
73 __entry->mperf = mperf; 79 __entry->mperf = mperf;
74 __entry->aperf = aperf; 80 __entry->aperf = aperf;
81 __entry->tsc = tsc;
75 __entry->freq = freq; 82 __entry->freq = freq;
76 ), 83 ),
77 84
78 TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ", 85 TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ",
79 (unsigned long)__entry->core_busy, 86 (unsigned long)__entry->core_busy,
80 (unsigned long)__entry->scaled_busy, 87 (unsigned long)__entry->scaled_busy,
81 (unsigned long)__entry->state, 88 (unsigned long)__entry->from,
89 (unsigned long)__entry->to,
82 (unsigned long long)__entry->mperf, 90 (unsigned long long)__entry->mperf,
83 (unsigned long long)__entry->aperf, 91 (unsigned long long)__entry->aperf,
92 (unsigned long long)__entry->tsc,
84 (unsigned long)__entry->freq 93 (unsigned long)__entry->freq
85 ) 94 )
86 95
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 04c3c6efdcc2..50fea660c0f8 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -6,7 +6,7 @@
6 6
7#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
8#include <linux/trace_seq.h> 8#include <linux/trace_seq.h>
9#include <scsi/scsi.h> 9#include <scsi/scsi_proto.h>
10#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
11#include <target/target_core_base.h> 11#include <target/target_core_base.h>
12 12
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 0f4f95d63c03..8b1f80682b80 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -77,6 +77,64 @@ TRACE_EVENT(thermal_zone_trip,
77 __entry->trip_type) 77 __entry->trip_type)
78); 78);
79 79
80TRACE_EVENT(thermal_power_cpu_get_power,
81 TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
82 size_t load_len, u32 dynamic_power, u32 static_power),
83
84 TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
85
86 TP_STRUCT__entry(
87 __bitmask(cpumask, num_possible_cpus())
88 __field(unsigned long, freq )
89 __dynamic_array(u32, load, load_len)
90 __field(size_t, load_len )
91 __field(u32, dynamic_power )
92 __field(u32, static_power )
93 ),
94
95 TP_fast_assign(
96 __assign_bitmask(cpumask, cpumask_bits(cpus),
97 num_possible_cpus());
98 __entry->freq = freq;
99 memcpy(__get_dynamic_array(load), load,
100 load_len * sizeof(*load));
101 __entry->load_len = load_len;
102 __entry->dynamic_power = dynamic_power;
103 __entry->static_power = static_power;
104 ),
105
106 TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d",
107 __get_bitmask(cpumask), __entry->freq,
108 __print_array(__get_dynamic_array(load), __entry->load_len, 4),
109 __entry->dynamic_power, __entry->static_power)
110);
111
112TRACE_EVENT(thermal_power_cpu_limit,
113 TP_PROTO(const struct cpumask *cpus, unsigned int freq,
114 unsigned long cdev_state, u32 power),
115
116 TP_ARGS(cpus, freq, cdev_state, power),
117
118 TP_STRUCT__entry(
119 __bitmask(cpumask, num_possible_cpus())
120 __field(unsigned int, freq )
121 __field(unsigned long, cdev_state)
122 __field(u32, power )
123 ),
124
125 TP_fast_assign(
126 __assign_bitmask(cpumask, cpumask_bits(cpus),
127 num_possible_cpus());
128 __entry->freq = freq;
129 __entry->cdev_state = cdev_state;
130 __entry->power = power;
131 ),
132
133 TP_printk("cpus=%s freq=%u cdev_state=%lu power=%u",
134 __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
135 __entry->power)
136);
137
80#endif /* _TRACE_THERMAL_H */ 138#endif /* _TRACE_THERMAL_H */
81 139
82/* This part must be outside protection */ 140/* This part must be outside protection */
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
new file mode 100644
index 000000000000..12e1321c4e0c
--- /dev/null
+++ b/include/trace/events/thermal_power_allocator.h
@@ -0,0 +1,87 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM thermal_power_allocator
3
4#if !defined(_TRACE_THERMAL_POWER_ALLOCATOR_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_THERMAL_POWER_ALLOCATOR_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(thermal_power_allocator,
10 TP_PROTO(struct thermal_zone_device *tz, u32 *req_power,
11 u32 total_req_power, u32 *granted_power,
12 u32 total_granted_power, size_t num_actors,
13 u32 power_range, u32 max_allocatable_power,
14 unsigned long current_temp, s32 delta_temp),
15 TP_ARGS(tz, req_power, total_req_power, granted_power,
16 total_granted_power, num_actors, power_range,
17 max_allocatable_power, current_temp, delta_temp),
18 TP_STRUCT__entry(
19 __field(int, tz_id )
20 __dynamic_array(u32, req_power, num_actors )
21 __field(u32, total_req_power )
22 __dynamic_array(u32, granted_power, num_actors)
23 __field(u32, total_granted_power )
24 __field(size_t, num_actors )
25 __field(u32, power_range )
26 __field(u32, max_allocatable_power )
27 __field(unsigned long, current_temp )
28 __field(s32, delta_temp )
29 ),
30 TP_fast_assign(
31 __entry->tz_id = tz->id;
32 memcpy(__get_dynamic_array(req_power), req_power,
33 num_actors * sizeof(*req_power));
34 __entry->total_req_power = total_req_power;
35 memcpy(__get_dynamic_array(granted_power), granted_power,
36 num_actors * sizeof(*granted_power));
37 __entry->total_granted_power = total_granted_power;
38 __entry->num_actors = num_actors;
39 __entry->power_range = power_range;
40 __entry->max_allocatable_power = max_allocatable_power;
41 __entry->current_temp = current_temp;
42 __entry->delta_temp = delta_temp;
43 ),
44
45 TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d",
46 __entry->tz_id,
47 __print_array(__get_dynamic_array(req_power),
48 __entry->num_actors, 4),
49 __entry->total_req_power,
50 __print_array(__get_dynamic_array(granted_power),
51 __entry->num_actors, 4),
52 __entry->total_granted_power, __entry->power_range,
53 __entry->max_allocatable_power, __entry->current_temp,
54 __entry->delta_temp)
55);
56
57TRACE_EVENT(thermal_power_allocator_pid,
58 TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral,
59 s64 p, s64 i, s64 d, s32 output),
60 TP_ARGS(tz, err, err_integral, p, i, d, output),
61 TP_STRUCT__entry(
62 __field(int, tz_id )
63 __field(s32, err )
64 __field(s32, err_integral)
65 __field(s64, p )
66 __field(s64, i )
67 __field(s64, d )
68 __field(s32, output )
69 ),
70 TP_fast_assign(
71 __entry->tz_id = tz->id;
72 __entry->err = err;
73 __entry->err_integral = err_integral;
74 __entry->p = p;
75 __entry->i = i;
76 __entry->d = d;
77 __entry->output = output;
78 ),
79
80 TP_printk("thermal_zone_id=%d err=%d err_integral=%d p=%lld i=%lld d=%lld output=%d",
81 __entry->tz_id, __entry->err, __entry->err_integral,
82 __entry->p, __entry->i, __entry->d, __entry->output)
83);
84#endif /* _TRACE_THERMAL_POWER_ALLOCATOR_H */
85
86/* This part must be outside protection */
87#include <trace/define_trace.h>
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 20112170ff11..89d0497c058a 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -83,7 +83,8 @@ SHOW_FIELD
83 { V4L2_BUF_FLAG_TIMESTAMP_MASK, "TIMESTAMP_MASK" }, \ 83 { V4L2_BUF_FLAG_TIMESTAMP_MASK, "TIMESTAMP_MASK" }, \
84 { V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN, "TIMESTAMP_UNKNOWN" }, \ 84 { V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN, "TIMESTAMP_UNKNOWN" }, \
85 { V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \ 85 { V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \
86 { V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" }) 86 { V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" }, \
87 { V4L2_BUF_FLAG_LAST, "LAST" })
87 88
88#define show_timecode_flags(flags) \ 89#define show_timecode_flags(flags) \
89 __print_flags(flags, "|", \ 90 __print_flags(flags, "|", \
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index c178d13d6f4c..a7aa607a4c55 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -360,7 +360,7 @@ TRACE_EVENT(global_dirty_state,
360 __entry->nr_written = global_page_state(NR_WRITTEN); 360 __entry->nr_written = global_page_state(NR_WRITTEN);
361 __entry->background_thresh = background_thresh; 361 __entry->background_thresh = background_thresh;
362 __entry->dirty_thresh = dirty_thresh; 362 __entry->dirty_thresh = dirty_thresh;
363 __entry->dirty_limit = global_dirty_limit; 363 __entry->dirty_limit = global_wb_domain.dirty_limit;
364 ), 364 ),
365 365
366 TP_printk("dirty=%lu writeback=%lu unstable=%lu " 366 TP_printk("dirty=%lu writeback=%lu unstable=%lu "
@@ -399,13 +399,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
399 399
400 TP_fast_assign( 400 TP_fast_assign(
401 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 401 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
402 __entry->write_bw = KBps(bdi->write_bandwidth); 402 __entry->write_bw = KBps(bdi->wb.write_bandwidth);
403 __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth); 403 __entry->avg_write_bw = KBps(bdi->wb.avg_write_bandwidth);
404 __entry->dirty_rate = KBps(dirty_rate); 404 __entry->dirty_rate = KBps(dirty_rate);
405 __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); 405 __entry->dirty_ratelimit = KBps(bdi->wb.dirty_ratelimit);
406 __entry->task_ratelimit = KBps(task_ratelimit); 406 __entry->task_ratelimit = KBps(task_ratelimit);
407 __entry->balanced_dirty_ratelimit = 407 __entry->balanced_dirty_ratelimit =
408 KBps(bdi->balanced_dirty_ratelimit); 408 KBps(bdi->wb.balanced_dirty_ratelimit);
409 ), 409 ),
410 410
411 TP_printk("bdi %s: " 411 TP_printk("bdi %s: "
@@ -462,8 +462,9 @@ TRACE_EVENT(balance_dirty_pages,
462 unsigned long freerun = (thresh + bg_thresh) / 2; 462 unsigned long freerun = (thresh + bg_thresh) / 2;
463 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 463 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
464 464
465 __entry->limit = global_dirty_limit; 465 __entry->limit = global_wb_domain.dirty_limit;
466 __entry->setpoint = (global_dirty_limit + freerun) / 2; 466 __entry->setpoint = (global_wb_domain.dirty_limit +
467 freerun) / 2;
467 __entry->dirty = dirty; 468 __entry->dirty = dirty;
468 __entry->bdi_setpoint = __entry->setpoint * 469 __entry->bdi_setpoint = __entry->setpoint *
469 bdi_thresh / (thresh + 1); 470 bdi_thresh / (thresh + 1);
diff --git a/include/trace/perf.h b/include/trace/perf.h
new file mode 100644
index 000000000000..1b5443cebedc
--- /dev/null
+++ b/include/trace/perf.h
@@ -0,0 +1,350 @@
1/*
2 * Stage 4 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * For those macros defined with TRACE_EVENT:
7 *
8 * static struct trace_event_call event_<call>;
9 *
10 * static void trace_event_raw_event_<call>(void *__data, proto)
11 * {
12 * struct trace_event_file *trace_file = __data;
13 * struct trace_event_call *event_call = trace_file->event_call;
14 * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
15 * unsigned long eflags = trace_file->flags;
16 * enum event_trigger_type __tt = ETT_NONE;
17 * struct ring_buffer_event *event;
18 * struct trace_event_raw_<call> *entry; <-- defined in stage 1
19 * struct ring_buffer *buffer;
20 * unsigned long irq_flags;
21 * int __data_size;
22 * int pc;
23 *
24 * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
25 * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
26 * event_triggers_call(trace_file, NULL);
27 * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
28 * return;
29 * }
30 *
31 * local_save_flags(irq_flags);
32 * pc = preempt_count();
33 *
34 * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
35 *
36 * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
37 * event_<call>->event.type,
38 * sizeof(*entry) + __data_size,
39 * irq_flags, pc);
40 * if (!event)
41 * return;
42 * entry = ring_buffer_event_data(event);
43 *
44 * { <assign>; } <-- Here we assign the entries by the __field and
45 * __array macros.
46 *
47 * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
48 * __tt = event_triggers_call(trace_file, entry);
49 *
50 * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
51 * &trace_file->flags))
52 * ring_buffer_discard_commit(buffer, event);
53 * else if (!filter_check_discard(trace_file, entry, buffer, event))
54 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
55 *
56 * if (__tt)
57 * event_triggers_post_call(trace_file, __tt);
58 * }
59 *
60 * static struct trace_event ftrace_event_type_<call> = {
61 * .trace = trace_raw_output_<call>, <-- stage 2
62 * };
63 *
64 * static char print_fmt_<call>[] = <TP_printk>;
65 *
66 * static struct trace_event_class __used event_class_<template> = {
67 * .system = "<system>",
68 * .define_fields = trace_event_define_fields_<call>,
69 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
70 * .raw_init = trace_event_raw_init,
71 * .probe = trace_event_raw_event_##call,
72 * .reg = trace_event_reg,
73 * };
74 *
75 * static struct trace_event_call event_<call> = {
76 * .class = event_class_<template>,
77 * {
78 * .tp = &__tracepoint_<call>,
79 * },
80 * .event = &ftrace_event_type_<call>,
81 * .print_fmt = print_fmt_<call>,
82 * .flags = TRACE_EVENT_FL_TRACEPOINT,
83 * };
84 * // its only safe to use pointers when doing linker tricks to
85 * // create an array.
86 * static struct trace_event_call __used
87 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
88 *
89 */
90
91#ifdef CONFIG_PERF_EVENTS
92
93#define _TRACE_PERF_PROTO(call, proto) \
94 static notrace void \
95 perf_trace_##call(void *__data, proto);
96
97#define _TRACE_PERF_INIT(call) \
98 .perf_probe = perf_trace_##call,
99
100#else
101#define _TRACE_PERF_PROTO(call, proto)
102#define _TRACE_PERF_INIT(call)
103#endif /* CONFIG_PERF_EVENTS */
104
105#undef __entry
106#define __entry entry
107
108#undef __field
109#define __field(type, item)
110
111#undef __field_struct
112#define __field_struct(type, item)
113
114#undef __array
115#define __array(type, item, len)
116
117#undef __dynamic_array
118#define __dynamic_array(type, item, len) \
119 __entry->__data_loc_##item = __data_offsets.item;
120
121#undef __string
122#define __string(item, src) __dynamic_array(char, item, -1)
123
124#undef __assign_str
125#define __assign_str(dst, src) \
126 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
127
128#undef __bitmask
129#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
130
131#undef __get_bitmask
132#define __get_bitmask(field) (char *)__get_dynamic_array(field)
133
134#undef __assign_bitmask
135#define __assign_bitmask(dst, src, nr_bits) \
136 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
137
138#undef TP_fast_assign
139#define TP_fast_assign(args...) args
140
141#undef __perf_addr
142#define __perf_addr(a) (a)
143
144#undef __perf_count
145#define __perf_count(c) (c)
146
147#undef __perf_task
148#define __perf_task(t) (t)
149
150#undef DECLARE_EVENT_CLASS
151#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
152 \
153static notrace void \
154trace_event_raw_event_##call(void *__data, proto) \
155{ \
156 struct trace_event_file *trace_file = __data; \
157 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
158 struct trace_event_buffer fbuffer; \
159 struct trace_event_raw_##call *entry; \
160 int __data_size; \
161 \
162 if (trace_trigger_soft_disabled(trace_file)) \
163 return; \
164 \
165 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
166 \
167 entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
168 sizeof(*entry) + __data_size); \
169 \
170 if (!entry) \
171 return; \
172 \
173 tstruct \
174 \
175 { assign; } \
176 \
177 trace_event_buffer_commit(&fbuffer); \
178}
179/*
180 * The ftrace_test_probe is compiled out, it is only here as a build time check
181 * to make sure that if the tracepoint handling changes, the ftrace probe will
182 * fail to compile unless it too is updated.
183 */
184
185#undef DEFINE_EVENT
186#define DEFINE_EVENT(template, call, proto, args) \
187static inline void ftrace_test_probe_##call(void) \
188{ \
189 check_trace_callback_type_##call(trace_event_raw_event_##template); \
190}
191
192#undef DEFINE_EVENT_PRINT
193#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
194
195#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
196
197#undef __entry
198#define __entry REC
199
200#undef __print_flags
201#undef __print_symbolic
202#undef __print_hex
203#undef __get_dynamic_array
204#undef __get_dynamic_array_len
205#undef __get_str
206#undef __get_bitmask
207#undef __print_array
208
209#undef TP_printk
210#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
211
212#undef DECLARE_EVENT_CLASS
213#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
214_TRACE_PERF_PROTO(call, PARAMS(proto)); \
215static char print_fmt_##call[] = print; \
216static struct trace_event_class __used __refdata event_class_##call = { \
217 .system = TRACE_SYSTEM_STRING, \
218 .define_fields = trace_event_define_fields_##call, \
219 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
220 .raw_init = trace_event_raw_init, \
221 .probe = trace_event_raw_event_##call, \
222 .reg = trace_event_reg, \
223 _TRACE_PERF_INIT(call) \
224};
225
226#undef DEFINE_EVENT
227#define DEFINE_EVENT(template, call, proto, args) \
228 \
229static struct trace_event_call __used event_##call = { \
230 .class = &event_class_##template, \
231 { \
232 .tp = &__tracepoint_##call, \
233 }, \
234 .event.funcs = &trace_event_type_funcs_##template, \
235 .print_fmt = print_fmt_##template, \
236 .flags = TRACE_EVENT_FL_TRACEPOINT, \
237}; \
238static struct trace_event_call __used \
239__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
240
241#undef DEFINE_EVENT_PRINT
242#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
243 \
244static char print_fmt_##call[] = print; \
245 \
246static struct trace_event_call __used event_##call = { \
247 .class = &event_class_##template, \
248 { \
249 .tp = &__tracepoint_##call, \
250 }, \
251 .event.funcs = &trace_event_type_funcs_##call, \
252 .print_fmt = print_fmt_##call, \
253 .flags = TRACE_EVENT_FL_TRACEPOINT, \
254}; \
255static struct trace_event_call __used \
256__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
257
258#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
259
260#undef TRACE_SYSTEM_VAR
261
262#ifdef CONFIG_PERF_EVENTS
263
264#undef __entry
265#define __entry entry
266
267#undef __get_dynamic_array
268#define __get_dynamic_array(field) \
269 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
270
271#undef __get_dynamic_array_len
272#define __get_dynamic_array_len(field) \
273 ((__entry->__data_loc_##field >> 16) & 0xffff)
274
275#undef __get_str
276#define __get_str(field) (char *)__get_dynamic_array(field)
277
278#undef __get_bitmask
279#define __get_bitmask(field) (char *)__get_dynamic_array(field)
280
281#undef __perf_addr
282#define __perf_addr(a) (__addr = (a))
283
284#undef __perf_count
285#define __perf_count(c) (__count = (c))
286
287#undef __perf_task
288#define __perf_task(t) (__task = (t))
289
290#undef DECLARE_EVENT_CLASS
291#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
292static notrace void \
293perf_trace_##call(void *__data, proto) \
294{ \
295 struct trace_event_call *event_call = __data; \
296 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
297 struct trace_event_raw_##call *entry; \
298 struct pt_regs *__regs; \
299 u64 __addr = 0, __count = 1; \
300 struct task_struct *__task = NULL; \
301 struct hlist_head *head; \
302 int __entry_size; \
303 int __data_size; \
304 int rctx; \
305 \
306 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
307 \
308 head = this_cpu_ptr(event_call->perf_events); \
309 if (__builtin_constant_p(!__task) && !__task && \
310 hlist_empty(head)) \
311 return; \
312 \
313 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
314 sizeof(u64)); \
315 __entry_size -= sizeof(u32); \
316 \
317 entry = perf_trace_buf_prepare(__entry_size, \
318 event_call->event.type, &__regs, &rctx); \
319 if (!entry) \
320 return; \
321 \
322 perf_fetch_caller_regs(__regs); \
323 \
324 tstruct \
325 \
326 { assign; } \
327 \
328 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
329 __count, __regs, head, __task); \
330}
331
332/*
333 * This part is compiled out, it is only here as a build time check
334 * to make sure that if the tracepoint handling changes, the
335 * perf probe will fail to compile unless it too is updated.
336 */
337#undef DEFINE_EVENT
338#define DEFINE_EVENT(template, call, proto, args) \
339static inline void perf_test_probe_##call(void) \
340{ \
341 check_trace_callback_type_##call(perf_trace_##template); \
342}
343
344
345#undef DEFINE_EVENT_PRINT
346#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
347 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
348
349#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
350#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 9674145e2f6a..7434f0f5d3f6 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5#include <linux/unistd.h> 5#include <linux/unistd.h>
6#include <linux/ftrace_event.h> 6#include <linux/trace_events.h>
7#include <linux/thread_info.h> 7#include <linux/thread_info.h>
8 8
9#include <asm/ptrace.h> 9#include <asm/ptrace.h>
@@ -29,8 +29,8 @@ struct syscall_metadata {
29 const char **args; 29 const char **args;
30 struct list_head enter_fields; 30 struct list_head enter_fields;
31 31
32 struct ftrace_event_call *enter_event; 32 struct trace_event_call *enter_event;
33 struct ftrace_event_call *exit_event; 33 struct trace_event_call *exit_event;
34}; 34};
35 35
36#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) 36#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
diff --git a/include/trace/ftrace.h b/include/trace/trace_events.h
index 37d4b10b111d..43be3b0e44d3 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/trace_events.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Override the macros in <trace/trace_events.h> to include the following: 4 * Override the macros in <trace/trace_events.h> to include the following:
5 * 5 *
6 * struct ftrace_raw_<call> { 6 * struct trace_event_raw_<call> {
7 * struct trace_entry ent; 7 * struct trace_entry ent;
8 * <type> <item>; 8 * <type> <item>;
9 * <type2> <item2>[<len>]; 9 * <type2> <item2>[<len>];
@@ -16,7 +16,7 @@
16 * in the structure. 16 * in the structure.
17 */ 17 */
18 18
19#include <linux/ftrace_event.h> 19#include <linux/trace_events.h>
20 20
21#ifndef TRACE_SYSTEM_VAR 21#ifndef TRACE_SYSTEM_VAR
22#define TRACE_SYSTEM_VAR TRACE_SYSTEM 22#define TRACE_SYSTEM_VAR TRACE_SYSTEM
@@ -95,17 +95,17 @@ TRACE_MAKE_SYSTEM_STR();
95 95
96#undef DECLARE_EVENT_CLASS 96#undef DECLARE_EVENT_CLASS
97#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 97#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
98 struct ftrace_raw_##name { \ 98 struct trace_event_raw_##name { \
99 struct trace_entry ent; \ 99 struct trace_entry ent; \
100 tstruct \ 100 tstruct \
101 char __data[0]; \ 101 char __data[0]; \
102 }; \ 102 }; \
103 \ 103 \
104 static struct ftrace_event_class event_class_##name; 104 static struct trace_event_class event_class_##name;
105 105
106#undef DEFINE_EVENT 106#undef DEFINE_EVENT
107#define DEFINE_EVENT(template, name, proto, args) \ 107#define DEFINE_EVENT(template, name, proto, args) \
108 static struct ftrace_event_call __used \ 108 static struct trace_event_call __used \
109 __attribute__((__aligned__(4))) event_##name 109 __attribute__((__aligned__(4))) event_##name
110 110
111#undef DEFINE_EVENT_FN 111#undef DEFINE_EVENT_FN
@@ -138,7 +138,7 @@ TRACE_MAKE_SYSTEM_STR();
138 * 138 *
139 * Include the following: 139 * Include the following:
140 * 140 *
141 * struct ftrace_data_offsets_<call> { 141 * struct trace_event_data_offsets_<call> {
142 * u32 <item1>; 142 * u32 <item1>;
143 * u32 <item2>; 143 * u32 <item2>;
144 * [...] 144 * [...]
@@ -178,7 +178,7 @@ TRACE_MAKE_SYSTEM_STR();
178 178
179#undef DECLARE_EVENT_CLASS 179#undef DECLARE_EVENT_CLASS
180#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 180#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
181 struct ftrace_data_offsets_##call { \ 181 struct trace_event_data_offsets_##call { \
182 tstruct; \ 182 tstruct; \
183 }; 183 };
184 184
@@ -203,10 +203,10 @@ TRACE_MAKE_SYSTEM_STR();
203 * Override the macros in <trace/trace_events.h> to include the following: 203 * Override the macros in <trace/trace_events.h> to include the following:
204 * 204 *
205 * enum print_line_t 205 * enum print_line_t
206 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 206 * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
207 * { 207 * {
208 * struct trace_seq *s = &iter->seq; 208 * struct trace_seq *s = &iter->seq;
209 * struct ftrace_raw_<call> *field; <-- defined in stage 1 209 * struct trace_event_raw_<call> *field; <-- defined in stage 1
210 * struct trace_entry *entry; 210 * struct trace_entry *entry;
211 * struct trace_seq *p = &iter->tmp_seq; 211 * struct trace_seq *p = &iter->tmp_seq;
212 * int ret; 212 * int ret;
@@ -258,7 +258,7 @@ TRACE_MAKE_SYSTEM_STR();
258 void *__bitmask = __get_dynamic_array(field); \ 258 void *__bitmask = __get_dynamic_array(field); \
259 unsigned int __bitmask_size; \ 259 unsigned int __bitmask_size; \
260 __bitmask_size = __get_dynamic_array_len(field); \ 260 __bitmask_size = __get_dynamic_array_len(field); \
261 ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ 261 trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
262 }) 262 })
263 263
264#undef __print_flags 264#undef __print_flags
@@ -266,7 +266,7 @@ TRACE_MAKE_SYSTEM_STR();
266 ({ \ 266 ({ \
267 static const struct trace_print_flags __flags[] = \ 267 static const struct trace_print_flags __flags[] = \
268 { flag_array, { -1, NULL }}; \ 268 { flag_array, { -1, NULL }}; \
269 ftrace_print_flags_seq(p, delim, flag, __flags); \ 269 trace_print_flags_seq(p, delim, flag, __flags); \
270 }) 270 })
271 271
272#undef __print_symbolic 272#undef __print_symbolic
@@ -274,7 +274,7 @@ TRACE_MAKE_SYSTEM_STR();
274 ({ \ 274 ({ \
275 static const struct trace_print_flags symbols[] = \ 275 static const struct trace_print_flags symbols[] = \
276 { symbol_array, { -1, NULL }}; \ 276 { symbol_array, { -1, NULL }}; \
277 ftrace_print_symbols_seq(p, value, symbols); \ 277 trace_print_symbols_seq(p, value, symbols); \
278 }) 278 })
279 279
280#undef __print_symbolic_u64 280#undef __print_symbolic_u64
@@ -283,7 +283,7 @@ TRACE_MAKE_SYSTEM_STR();
283 ({ \ 283 ({ \
284 static const struct trace_print_flags_u64 symbols[] = \ 284 static const struct trace_print_flags_u64 symbols[] = \
285 { symbol_array, { -1, NULL } }; \ 285 { symbol_array, { -1, NULL } }; \
286 ftrace_print_symbols_seq_u64(p, value, symbols); \ 286 trace_print_symbols_seq_u64(p, value, symbols); \
287 }) 287 })
288#else 288#else
289#define __print_symbolic_u64(value, symbol_array...) \ 289#define __print_symbolic_u64(value, symbol_array...) \
@@ -291,30 +291,30 @@ TRACE_MAKE_SYSTEM_STR();
291#endif 291#endif
292 292
293#undef __print_hex 293#undef __print_hex
294#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) 294#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
295 295
296#undef __print_array 296#undef __print_array
297#define __print_array(array, count, el_size) \ 297#define __print_array(array, count, el_size) \
298 ({ \ 298 ({ \
299 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ 299 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
300 el_size != 4 && el_size != 8); \ 300 el_size != 4 && el_size != 8); \
301 ftrace_print_array_seq(p, array, count, el_size); \ 301 trace_print_array_seq(p, array, count, el_size); \
302 }) 302 })
303 303
304#undef DECLARE_EVENT_CLASS 304#undef DECLARE_EVENT_CLASS
305#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 305#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
306static notrace enum print_line_t \ 306static notrace enum print_line_t \
307ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 307trace_raw_output_##call(struct trace_iterator *iter, int flags, \
308 struct trace_event *trace_event) \ 308 struct trace_event *trace_event) \
309{ \ 309{ \
310 struct trace_seq *s = &iter->seq; \ 310 struct trace_seq *s = &iter->seq; \
311 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 311 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
312 struct ftrace_raw_##call *field; \ 312 struct trace_event_raw_##call *field; \
313 int ret; \ 313 int ret; \
314 \ 314 \
315 field = (typeof(field))iter->ent; \ 315 field = (typeof(field))iter->ent; \
316 \ 316 \
317 ret = ftrace_raw_output_prep(iter, trace_event); \ 317 ret = trace_raw_output_prep(iter, trace_event); \
318 if (ret != TRACE_TYPE_HANDLED) \ 318 if (ret != TRACE_TYPE_HANDLED) \
319 return ret; \ 319 return ret; \
320 \ 320 \
@@ -322,17 +322,17 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
322 \ 322 \
323 return trace_handle_return(s); \ 323 return trace_handle_return(s); \
324} \ 324} \
325static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 325static struct trace_event_functions trace_event_type_funcs_##call = { \
326 .trace = ftrace_raw_output_##call, \ 326 .trace = trace_raw_output_##call, \
327}; 327};
328 328
329#undef DEFINE_EVENT_PRINT 329#undef DEFINE_EVENT_PRINT
330#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 330#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
331static notrace enum print_line_t \ 331static notrace enum print_line_t \
332ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 332trace_raw_output_##call(struct trace_iterator *iter, int flags, \
333 struct trace_event *event) \ 333 struct trace_event *event) \
334{ \ 334{ \
335 struct ftrace_raw_##template *field; \ 335 struct trace_event_raw_##template *field; \
336 struct trace_entry *entry; \ 336 struct trace_entry *entry; \
337 struct trace_seq *p = &iter->tmp_seq; \ 337 struct trace_seq *p = &iter->tmp_seq; \
338 \ 338 \
@@ -346,10 +346,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
346 field = (typeof(field))entry; \ 346 field = (typeof(field))entry; \
347 \ 347 \
348 trace_seq_init(p); \ 348 trace_seq_init(p); \
349 return ftrace_output_call(iter, #call, print); \ 349 return trace_output_call(iter, #call, print); \
350} \ 350} \
351static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 351static struct trace_event_functions trace_event_type_funcs_##call = { \
352 .trace = ftrace_raw_output_##call, \ 352 .trace = trace_raw_output_##call, \
353}; 353};
354 354
355#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 355#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -407,9 +407,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
407#undef DECLARE_EVENT_CLASS 407#undef DECLARE_EVENT_CLASS
408#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 408#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
409static int notrace __init \ 409static int notrace __init \
410ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 410trace_event_define_fields_##call(struct trace_event_call *event_call) \
411{ \ 411{ \
412 struct ftrace_raw_##call field; \ 412 struct trace_event_raw_##call field; \
413 int ret; \ 413 int ret; \
414 \ 414 \
415 tstruct; \ 415 tstruct; \
@@ -485,12 +485,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
485 485
486#undef DECLARE_EVENT_CLASS 486#undef DECLARE_EVENT_CLASS
487#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 487#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
488static inline notrace int ftrace_get_offsets_##call( \ 488static inline notrace int trace_event_get_offsets_##call( \
489 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 489 struct trace_event_data_offsets_##call *__data_offsets, proto) \
490{ \ 490{ \
491 int __data_size = 0; \ 491 int __data_size = 0; \
492 int __maybe_unused __item_length; \ 492 int __maybe_unused __item_length; \
493 struct ftrace_raw_##call __maybe_unused *entry; \ 493 struct trace_event_raw_##call __maybe_unused *entry; \
494 \ 494 \
495 tstruct; \ 495 tstruct; \
496 \ 496 \
@@ -506,354 +506,3 @@ static inline notrace int ftrace_get_offsets_##call( \
506 506
507#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 507#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
508 508
509/*
510 * Stage 4 of the trace events.
511 *
512 * Override the macros in <trace/trace_events.h> to include the following:
513 *
514 * For those macros defined with TRACE_EVENT:
515 *
516 * static struct ftrace_event_call event_<call>;
517 *
518 * static void ftrace_raw_event_<call>(void *__data, proto)
519 * {
520 * struct ftrace_event_file *ftrace_file = __data;
521 * struct ftrace_event_call *event_call = ftrace_file->event_call;
522 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
523 * unsigned long eflags = ftrace_file->flags;
524 * enum event_trigger_type __tt = ETT_NONE;
525 * struct ring_buffer_event *event;
526 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
527 * struct ring_buffer *buffer;
528 * unsigned long irq_flags;
529 * int __data_size;
530 * int pc;
531 *
532 * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
533 * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
534 * event_triggers_call(ftrace_file, NULL);
535 * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
536 * return;
537 * }
538 *
539 * local_save_flags(irq_flags);
540 * pc = preempt_count();
541 *
542 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
543 *
544 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
545 * event_<call>->event.type,
546 * sizeof(*entry) + __data_size,
547 * irq_flags, pc);
548 * if (!event)
549 * return;
550 * entry = ring_buffer_event_data(event);
551 *
552 * { <assign>; } <-- Here we assign the entries by the __field and
553 * __array macros.
554 *
555 * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
556 * __tt = event_triggers_call(ftrace_file, entry);
557 *
558 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
559 * &ftrace_file->flags))
560 * ring_buffer_discard_commit(buffer, event);
561 * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
562 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
563 *
564 * if (__tt)
565 * event_triggers_post_call(ftrace_file, __tt);
566 * }
567 *
568 * static struct trace_event ftrace_event_type_<call> = {
569 * .trace = ftrace_raw_output_<call>, <-- stage 2
570 * };
571 *
572 * static char print_fmt_<call>[] = <TP_printk>;
573 *
574 * static struct ftrace_event_class __used event_class_<template> = {
575 * .system = "<system>",
576 * .define_fields = ftrace_define_fields_<call>,
577 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
578 * .raw_init = trace_event_raw_init,
579 * .probe = ftrace_raw_event_##call,
580 * .reg = ftrace_event_reg,
581 * };
582 *
583 * static struct ftrace_event_call event_<call> = {
584 * .class = event_class_<template>,
585 * {
586 * .tp = &__tracepoint_<call>,
587 * },
588 * .event = &ftrace_event_type_<call>,
589 * .print_fmt = print_fmt_<call>,
590 * .flags = TRACE_EVENT_FL_TRACEPOINT,
591 * };
592 * // its only safe to use pointers when doing linker tricks to
593 * // create an array.
594 * static struct ftrace_event_call __used
595 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
596 *
597 */
598
599#ifdef CONFIG_PERF_EVENTS
600
601#define _TRACE_PERF_PROTO(call, proto) \
602 static notrace void \
603 perf_trace_##call(void *__data, proto);
604
605#define _TRACE_PERF_INIT(call) \
606 .perf_probe = perf_trace_##call,
607
608#else
609#define _TRACE_PERF_PROTO(call, proto)
610#define _TRACE_PERF_INIT(call)
611#endif /* CONFIG_PERF_EVENTS */
612
613#undef __entry
614#define __entry entry
615
616#undef __field
617#define __field(type, item)
618
619#undef __field_struct
620#define __field_struct(type, item)
621
622#undef __array
623#define __array(type, item, len)
624
625#undef __dynamic_array
626#define __dynamic_array(type, item, len) \
627 __entry->__data_loc_##item = __data_offsets.item;
628
629#undef __string
630#define __string(item, src) __dynamic_array(char, item, -1)
631
632#undef __assign_str
633#define __assign_str(dst, src) \
634 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
635
636#undef __bitmask
637#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
638
639#undef __get_bitmask
640#define __get_bitmask(field) (char *)__get_dynamic_array(field)
641
642#undef __assign_bitmask
643#define __assign_bitmask(dst, src, nr_bits) \
644 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
645
646#undef TP_fast_assign
647#define TP_fast_assign(args...) args
648
649#undef __perf_addr
650#define __perf_addr(a) (a)
651
652#undef __perf_count
653#define __perf_count(c) (c)
654
655#undef __perf_task
656#define __perf_task(t) (t)
657
658#undef DECLARE_EVENT_CLASS
659#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
660 \
661static notrace void \
662ftrace_raw_event_##call(void *__data, proto) \
663{ \
664 struct ftrace_event_file *ftrace_file = __data; \
665 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
666 struct ftrace_event_buffer fbuffer; \
667 struct ftrace_raw_##call *entry; \
668 int __data_size; \
669 \
670 if (ftrace_trigger_soft_disabled(ftrace_file)) \
671 return; \
672 \
673 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
674 \
675 entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
676 sizeof(*entry) + __data_size); \
677 \
678 if (!entry) \
679 return; \
680 \
681 tstruct \
682 \
683 { assign; } \
684 \
685 ftrace_event_buffer_commit(&fbuffer); \
686}
687/*
688 * The ftrace_test_probe is compiled out, it is only here as a build time check
689 * to make sure that if the tracepoint handling changes, the ftrace probe will
690 * fail to compile unless it too is updated.
691 */
692
693#undef DEFINE_EVENT
694#define DEFINE_EVENT(template, call, proto, args) \
695static inline void ftrace_test_probe_##call(void) \
696{ \
697 check_trace_callback_type_##call(ftrace_raw_event_##template); \
698}
699
700#undef DEFINE_EVENT_PRINT
701#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
702
703#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
704
705#undef __entry
706#define __entry REC
707
708#undef __print_flags
709#undef __print_symbolic
710#undef __print_hex
711#undef __get_dynamic_array
712#undef __get_dynamic_array_len
713#undef __get_str
714#undef __get_bitmask
715#undef __print_array
716
717#undef TP_printk
718#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
719
720#undef DECLARE_EVENT_CLASS
721#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
722_TRACE_PERF_PROTO(call, PARAMS(proto)); \
723static char print_fmt_##call[] = print; \
724static struct ftrace_event_class __used __refdata event_class_##call = { \
725 .system = TRACE_SYSTEM_STRING, \
726 .define_fields = ftrace_define_fields_##call, \
727 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
728 .raw_init = trace_event_raw_init, \
729 .probe = ftrace_raw_event_##call, \
730 .reg = ftrace_event_reg, \
731 _TRACE_PERF_INIT(call) \
732};
733
734#undef DEFINE_EVENT
735#define DEFINE_EVENT(template, call, proto, args) \
736 \
737static struct ftrace_event_call __used event_##call = { \
738 .class = &event_class_##template, \
739 { \
740 .tp = &__tracepoint_##call, \
741 }, \
742 .event.funcs = &ftrace_event_type_funcs_##template, \
743 .print_fmt = print_fmt_##template, \
744 .flags = TRACE_EVENT_FL_TRACEPOINT, \
745}; \
746static struct ftrace_event_call __used \
747__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
748
749#undef DEFINE_EVENT_PRINT
750#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
751 \
752static char print_fmt_##call[] = print; \
753 \
754static struct ftrace_event_call __used event_##call = { \
755 .class = &event_class_##template, \
756 { \
757 .tp = &__tracepoint_##call, \
758 }, \
759 .event.funcs = &ftrace_event_type_funcs_##call, \
760 .print_fmt = print_fmt_##call, \
761 .flags = TRACE_EVENT_FL_TRACEPOINT, \
762}; \
763static struct ftrace_event_call __used \
764__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
765
766#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
767
768#undef TRACE_SYSTEM_VAR
769
770#ifdef CONFIG_PERF_EVENTS
771
772#undef __entry
773#define __entry entry
774
775#undef __get_dynamic_array
776#define __get_dynamic_array(field) \
777 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
778
779#undef __get_dynamic_array_len
780#define __get_dynamic_array_len(field) \
781 ((__entry->__data_loc_##field >> 16) & 0xffff)
782
783#undef __get_str
784#define __get_str(field) (char *)__get_dynamic_array(field)
785
786#undef __get_bitmask
787#define __get_bitmask(field) (char *)__get_dynamic_array(field)
788
789#undef __perf_addr
790#define __perf_addr(a) (__addr = (a))
791
792#undef __perf_count
793#define __perf_count(c) (__count = (c))
794
795#undef __perf_task
796#define __perf_task(t) (__task = (t))
797
798#undef DECLARE_EVENT_CLASS
799#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
800static notrace void \
801perf_trace_##call(void *__data, proto) \
802{ \
803 struct ftrace_event_call *event_call = __data; \
804 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
805 struct ftrace_raw_##call *entry; \
806 struct pt_regs *__regs; \
807 u64 __addr = 0, __count = 1; \
808 struct task_struct *__task = NULL; \
809 struct hlist_head *head; \
810 int __entry_size; \
811 int __data_size; \
812 int rctx; \
813 \
814 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
815 \
816 head = this_cpu_ptr(event_call->perf_events); \
817 if (__builtin_constant_p(!__task) && !__task && \
818 hlist_empty(head)) \
819 return; \
820 \
821 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
822 sizeof(u64)); \
823 __entry_size -= sizeof(u32); \
824 \
825 entry = perf_trace_buf_prepare(__entry_size, \
826 event_call->event.type, &__regs, &rctx); \
827 if (!entry) \
828 return; \
829 \
830 perf_fetch_caller_regs(__regs); \
831 \
832 tstruct \
833 \
834 { assign; } \
835 \
836 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
837 __count, __regs, head, __task); \
838}
839
840/*
841 * This part is compiled out, it is only here as a build time check
842 * to make sure that if the tracepoint handling changes, the
843 * perf probe will fail to compile unless it too is updated.
844 */
845#undef DEFINE_EVENT
846#define DEFINE_EVENT(template, call, proto, args) \
847static inline void perf_test_probe_##call(void) \
848{ \
849 check_trace_callback_type_##call(perf_trace_##template); \
850}
851
852
853#undef DEFINE_EVENT_PRINT
854#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
855 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
856
857#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
858#endif /* CONFIG_PERF_EVENTS */
859
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
new file mode 100644
index 000000000000..fbdd11851725
--- /dev/null
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -0,0 +1,645 @@
1/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
2 *
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Copyright 2014 Advanced Micro Devices, Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Kevin E. Martin <martin@valinux.com>
28 * Gareth Hughes <gareth@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32#ifndef __AMDGPU_DRM_H__
33#define __AMDGPU_DRM_H__
34
35#include "drm.h"
36
37#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01
39#define DRM_AMDGPU_CTX 0x02
40#define DRM_AMDGPU_BO_LIST 0x03
41#define DRM_AMDGPU_CS 0x04
42#define DRM_AMDGPU_INFO 0x05
43#define DRM_AMDGPU_GEM_METADATA 0x06
44#define DRM_AMDGPU_GEM_WAIT_IDLE 0x07
45#define DRM_AMDGPU_GEM_VA 0x08
46#define DRM_AMDGPU_WAIT_CS 0x09
47#define DRM_AMDGPU_GEM_OP 0x10
48#define DRM_AMDGPU_GEM_USERPTR 0x11
49
50#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
51#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
52#define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
53#define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
54#define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
55#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
56#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
57#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
58#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
59#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
60#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
61#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
62
63#define AMDGPU_GEM_DOMAIN_CPU 0x1
64#define AMDGPU_GEM_DOMAIN_GTT 0x2
65#define AMDGPU_GEM_DOMAIN_VRAM 0x4
66#define AMDGPU_GEM_DOMAIN_GDS 0x8
67#define AMDGPU_GEM_DOMAIN_GWS 0x10
68#define AMDGPU_GEM_DOMAIN_OA 0x20
69
70/* Flag that CPU access will be required for the case of VRAM domain */
71#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
72/* Flag that CPU access will not work, this VRAM domain is invisible */
73#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
74/* Flag that USWC attributes should be used for GTT */
75#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
76
77struct drm_amdgpu_gem_create_in {
78 /** the requested memory size */
79 uint64_t bo_size;
80 /** physical start_addr alignment in bytes for some HW requirements */
81 uint64_t alignment;
82 /** the requested memory domains */
83 uint64_t domains;
84 /** allocation flags */
85 uint64_t domain_flags;
86};
87
88struct drm_amdgpu_gem_create_out {
89 /** returned GEM object handle */
90 uint32_t handle;
91 uint32_t _pad;
92};
93
94union drm_amdgpu_gem_create {
95 struct drm_amdgpu_gem_create_in in;
96 struct drm_amdgpu_gem_create_out out;
97};
98
99/** Opcode to create new residency list. */
100#define AMDGPU_BO_LIST_OP_CREATE 0
101/** Opcode to destroy previously created residency list */
102#define AMDGPU_BO_LIST_OP_DESTROY 1
103/** Opcode to update resource information in the list */
104#define AMDGPU_BO_LIST_OP_UPDATE 2
105
106struct drm_amdgpu_bo_list_in {
107 /** Type of operation */
108 uint32_t operation;
109 /** Handle of list or 0 if we want to create one */
110 uint32_t list_handle;
111 /** Number of BOs in list */
112 uint32_t bo_number;
113 /** Size of each element describing BO */
114 uint32_t bo_info_size;
115 /** Pointer to array describing BOs */
116 uint64_t bo_info_ptr;
117};
118
119struct drm_amdgpu_bo_list_entry {
120 /** Handle of BO */
121 uint32_t bo_handle;
122 /** New (if specified) BO priority to be used during migration */
123 uint32_t bo_priority;
124};
125
126struct drm_amdgpu_bo_list_out {
127 /** Handle of resource list */
128 uint32_t list_handle;
129 uint32_t _pad;
130};
131
132union drm_amdgpu_bo_list {
133 struct drm_amdgpu_bo_list_in in;
134 struct drm_amdgpu_bo_list_out out;
135};
136
137/* context related */
138#define AMDGPU_CTX_OP_ALLOC_CTX 1
139#define AMDGPU_CTX_OP_FREE_CTX 2
140#define AMDGPU_CTX_OP_QUERY_STATE 3
141
142/* GPU reset status */
143#define AMDGPU_CTX_NO_RESET 0
144/* this the context caused it */
145#define AMDGPU_CTX_GUILTY_RESET 1
146/* some other context caused it */
147#define AMDGPU_CTX_INNOCENT_RESET 2
148/* unknown cause */
149#define AMDGPU_CTX_UNKNOWN_RESET 3
150
151struct drm_amdgpu_ctx_in {
152 /** AMDGPU_CTX_OP_* */
153 uint32_t op;
154 /** For future use, no flags defined so far */
155 uint32_t flags;
156 uint32_t ctx_id;
157 uint32_t _pad;
158};
159
160union drm_amdgpu_ctx_out {
161 struct {
162 uint32_t ctx_id;
163 uint32_t _pad;
164 } alloc;
165
166 struct {
167 /** For future use, no flags defined so far */
168 uint64_t flags;
169 /** Number of resets caused by this context so far. */
170 uint32_t hangs;
171 /** Reset status since the last call of the ioctl. */
172 uint32_t reset_status;
173 } state;
174};
175
176union drm_amdgpu_ctx {
177 struct drm_amdgpu_ctx_in in;
178 union drm_amdgpu_ctx_out out;
179};
180
181/*
182 * This is not a reliable API and you should expect it to fail for any
183 * number of reasons and have fallback path that do not use userptr to
184 * perform any operation.
185 */
186#define AMDGPU_GEM_USERPTR_READONLY (1 << 0)
187#define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1)
188#define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2)
189#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
190
191struct drm_amdgpu_gem_userptr {
192 uint64_t addr;
193 uint64_t size;
194 /* AMDGPU_GEM_USERPTR_* */
195 uint32_t flags;
196 /* Resulting GEM handle */
197 uint32_t handle;
198};
199
200/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
201#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
202#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
203#define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
204#define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
205#define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
206#define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
207#define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
208#define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
209#define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
210#define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
211#define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
212#define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
213#define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
214#define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
215#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
216#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
217
218#define AMDGPU_TILING_SET(field, value) \
219 (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
220#define AMDGPU_TILING_GET(value, field) \
221 (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
222
223#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
224#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
225
226/** The same structure is shared for input/output */
227struct drm_amdgpu_gem_metadata {
228 /** GEM Object handle */
229 uint32_t handle;
230 /** Do we want get or set metadata */
231 uint32_t op;
232 struct {
233 /** For future use, no flags defined so far */
234 uint64_t flags;
235 /** family specific tiling info */
236 uint64_t tiling_info;
237 uint32_t data_size_bytes;
238 uint32_t data[64];
239 } data;
240};
241
242struct drm_amdgpu_gem_mmap_in {
243 /** the GEM object handle */
244 uint32_t handle;
245 uint32_t _pad;
246};
247
248struct drm_amdgpu_gem_mmap_out {
249 /** mmap offset from the vma offset manager */
250 uint64_t addr_ptr;
251};
252
253union drm_amdgpu_gem_mmap {
254 struct drm_amdgpu_gem_mmap_in in;
255 struct drm_amdgpu_gem_mmap_out out;
256};
257
258struct drm_amdgpu_gem_wait_idle_in {
259 /** GEM object handle */
260 uint32_t handle;
261 /** For future use, no flags defined so far */
262 uint32_t flags;
263 /** Absolute timeout to wait */
264 uint64_t timeout;
265};
266
267struct drm_amdgpu_gem_wait_idle_out {
268 /** BO status: 0 - BO is idle, 1 - BO is busy */
269 uint32_t status;
270 /** Returned current memory domain */
271 uint32_t domain;
272};
273
274union drm_amdgpu_gem_wait_idle {
275 struct drm_amdgpu_gem_wait_idle_in in;
276 struct drm_amdgpu_gem_wait_idle_out out;
277};
278
279struct drm_amdgpu_wait_cs_in {
280 /** Command submission handle */
281 uint64_t handle;
282 /** Absolute timeout to wait */
283 uint64_t timeout;
284 uint32_t ip_type;
285 uint32_t ip_instance;
286 uint32_t ring;
287 uint32_t ctx_id;
288};
289
290struct drm_amdgpu_wait_cs_out {
291 /** CS status: 0 - CS completed, 1 - CS still busy */
292 uint64_t status;
293};
294
295union drm_amdgpu_wait_cs {
296 struct drm_amdgpu_wait_cs_in in;
297 struct drm_amdgpu_wait_cs_out out;
298};
299
300#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
301#define AMDGPU_GEM_OP_SET_PLACEMENT 1
302
303/* Sets or returns a value associated with a buffer. */
304struct drm_amdgpu_gem_op {
305 /** GEM object handle */
306 uint32_t handle;
307 /** AMDGPU_GEM_OP_* */
308 uint32_t op;
309 /** Input or return value */
310 uint64_t value;
311};
312
313#define AMDGPU_VA_OP_MAP 1
314#define AMDGPU_VA_OP_UNMAP 2
315
316/* Delay the page table update till the next CS */
317#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
318
319/* Mapping flags */
320/* readable mapping */
321#define AMDGPU_VM_PAGE_READABLE (1 << 1)
322/* writable mapping */
323#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
324/* executable mapping, new for VI */
325#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
326
327struct drm_amdgpu_gem_va {
328 /** GEM object handle */
329 uint32_t handle;
330 uint32_t _pad;
331 /** AMDGPU_VA_OP_* */
332 uint32_t operation;
333 /** AMDGPU_VM_PAGE_* */
334 uint32_t flags;
335 /** va address to assign . Must be correctly aligned.*/
336 uint64_t va_address;
337 /** Specify offset inside of BO to assign. Must be correctly aligned.*/
338 uint64_t offset_in_bo;
339 /** Specify mapping size. Must be correctly aligned. */
340 uint64_t map_size;
341};
342
343#define AMDGPU_HW_IP_GFX 0
344#define AMDGPU_HW_IP_COMPUTE 1
345#define AMDGPU_HW_IP_DMA 2
346#define AMDGPU_HW_IP_UVD 3
347#define AMDGPU_HW_IP_VCE 4
348#define AMDGPU_HW_IP_NUM 5
349
350#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
351
352#define AMDGPU_CHUNK_ID_IB 0x01
353#define AMDGPU_CHUNK_ID_FENCE 0x02
354#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
355
356struct drm_amdgpu_cs_chunk {
357 uint32_t chunk_id;
358 uint32_t length_dw;
359 uint64_t chunk_data;
360};
361
362struct drm_amdgpu_cs_in {
363 /** Rendering context id */
364 uint32_t ctx_id;
365 /** Handle of resource list associated with CS */
366 uint32_t bo_list_handle;
367 uint32_t num_chunks;
368 uint32_t _pad;
369 /** this points to uint64_t * which point to cs chunks */
370 uint64_t chunks;
371};
372
373struct drm_amdgpu_cs_out {
374 uint64_t handle;
375};
376
377union drm_amdgpu_cs {
378 struct drm_amdgpu_cs_in in;
379 struct drm_amdgpu_cs_out out;
380};
381
382/* Specify flags to be used for IB */
383
384/* This IB should be submitted to CE */
385#define AMDGPU_IB_FLAG_CE (1<<0)
386
387/* CE Preamble */
388#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
389
390struct drm_amdgpu_cs_chunk_ib {
391 uint32_t _pad;
392 /** AMDGPU_IB_FLAG_* */
393 uint32_t flags;
394 /** Virtual address to begin IB execution */
395 uint64_t va_start;
396 /** Size of submission */
397 uint32_t ib_bytes;
398 /** HW IP to submit to */
399 uint32_t ip_type;
400 /** HW IP index of the same type to submit to */
401 uint32_t ip_instance;
402 /** Ring index to submit to */
403 uint32_t ring;
404};
405
406struct drm_amdgpu_cs_chunk_dep {
407 uint32_t ip_type;
408 uint32_t ip_instance;
409 uint32_t ring;
410 uint32_t ctx_id;
411 uint64_t handle;
412};
413
414struct drm_amdgpu_cs_chunk_fence {
415 uint32_t handle;
416 uint32_t offset;
417};
418
419struct drm_amdgpu_cs_chunk_data {
420 union {
421 struct drm_amdgpu_cs_chunk_ib ib_data;
422 struct drm_amdgpu_cs_chunk_fence fence_data;
423 };
424};
425
426/**
427 * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
428 *
429 */
430#define AMDGPU_IDS_FLAGS_FUSION 0x1
431
432/* indicate if acceleration can be working */
433#define AMDGPU_INFO_ACCEL_WORKING 0x00
434/* get the crtc_id from the mode object id? */
435#define AMDGPU_INFO_CRTC_FROM_ID 0x01
436/* query hw IP info */
437#define AMDGPU_INFO_HW_IP_INFO 0x02
438/* query hw IP instance count for the specified type */
439#define AMDGPU_INFO_HW_IP_COUNT 0x03
440/* timestamp for GL_ARB_timer_query */
441#define AMDGPU_INFO_TIMESTAMP 0x05
442/* Query the firmware version */
443#define AMDGPU_INFO_FW_VERSION 0x0e
444 /* Subquery id: Query VCE firmware version */
445 #define AMDGPU_INFO_FW_VCE 0x1
446 /* Subquery id: Query UVD firmware version */
447 #define AMDGPU_INFO_FW_UVD 0x2
448 /* Subquery id: Query GMC firmware version */
449 #define AMDGPU_INFO_FW_GMC 0x03
450 /* Subquery id: Query GFX ME firmware version */
451 #define AMDGPU_INFO_FW_GFX_ME 0x04
452 /* Subquery id: Query GFX PFP firmware version */
453 #define AMDGPU_INFO_FW_GFX_PFP 0x05
454 /* Subquery id: Query GFX CE firmware version */
455 #define AMDGPU_INFO_FW_GFX_CE 0x06
456 /* Subquery id: Query GFX RLC firmware version */
457 #define AMDGPU_INFO_FW_GFX_RLC 0x07
458 /* Subquery id: Query GFX MEC firmware version */
459 #define AMDGPU_INFO_FW_GFX_MEC 0x08
460 /* Subquery id: Query SMC firmware version */
461 #define AMDGPU_INFO_FW_SMC 0x0a
462 /* Subquery id: Query SDMA firmware version */
463 #define AMDGPU_INFO_FW_SDMA 0x0b
464/* number of bytes moved for TTM migration */
465#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
466/* the used VRAM size */
467#define AMDGPU_INFO_VRAM_USAGE 0x10
468/* the used GTT size */
469#define AMDGPU_INFO_GTT_USAGE 0x11
470/* Information about GDS, etc. resource configuration */
471#define AMDGPU_INFO_GDS_CONFIG 0x13
472/* Query information about VRAM and GTT domains */
473#define AMDGPU_INFO_VRAM_GTT 0x14
474/* Query information about register in MMR address space*/
475#define AMDGPU_INFO_READ_MMR_REG 0x15
476/* Query information about device: rev id, family, etc. */
477#define AMDGPU_INFO_DEV_INFO 0x16
478/* visible vram usage */
479#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
480
481#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
482#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
483#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
484#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
485
486/* Input structure for the INFO ioctl */
487struct drm_amdgpu_info {
488 /* Where the return value will be stored */
489 uint64_t return_pointer;
490 /* The size of the return value. Just like "size" in "snprintf",
491 * it limits how many bytes the kernel can write. */
492 uint32_t return_size;
493 /* The query request id. */
494 uint32_t query;
495
496 union {
497 struct {
498 uint32_t id;
499 uint32_t _pad;
500 } mode_crtc;
501
502 struct {
503 /** AMDGPU_HW_IP_* */
504 uint32_t type;
505 /**
506 * Index of the IP if there are more IPs of the same
507 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
508 */
509 uint32_t ip_instance;
510 } query_hw_ip;
511
512 struct {
513 uint32_t dword_offset;
514 /** number of registers to read */
515 uint32_t count;
516 uint32_t instance;
517 /** For future use, no flags defined so far */
518 uint32_t flags;
519 } read_mmr_reg;
520
521 struct {
522 /** AMDGPU_INFO_FW_* */
523 uint32_t fw_type;
524 /**
525 * Index of the IP if there are more IPs of
526 * the same type.
527 */
528 uint32_t ip_instance;
529 /**
530 * Index of the engine. Whether this is used depends
531 * on the firmware type. (e.g. MEC, SDMA)
532 */
533 uint32_t index;
534 uint32_t _pad;
535 } query_fw;
536 };
537};
538
539struct drm_amdgpu_info_gds {
540 /** GDS GFX partition size */
541 uint32_t gds_gfx_partition_size;
542 /** GDS compute partition size */
543 uint32_t compute_partition_size;
544 /** total GDS memory size */
545 uint32_t gds_total_size;
546 /** GWS size per GFX partition */
547 uint32_t gws_per_gfx_partition;
548 /** GSW size per compute partition */
549 uint32_t gws_per_compute_partition;
550 /** OA size per GFX partition */
551 uint32_t oa_per_gfx_partition;
552 /** OA size per compute partition */
553 uint32_t oa_per_compute_partition;
554 uint32_t _pad;
555};
556
557struct drm_amdgpu_info_vram_gtt {
558 uint64_t vram_size;
559 uint64_t vram_cpu_accessible_size;
560 uint64_t gtt_size;
561};
562
563struct drm_amdgpu_info_firmware {
564 uint32_t ver;
565 uint32_t feature;
566};
567
568#define AMDGPU_VRAM_TYPE_UNKNOWN 0
569#define AMDGPU_VRAM_TYPE_GDDR1 1
570#define AMDGPU_VRAM_TYPE_DDR2 2
571#define AMDGPU_VRAM_TYPE_GDDR3 3
572#define AMDGPU_VRAM_TYPE_GDDR4 4
573#define AMDGPU_VRAM_TYPE_GDDR5 5
574#define AMDGPU_VRAM_TYPE_HBM 6
575#define AMDGPU_VRAM_TYPE_DDR3 7
576
577struct drm_amdgpu_info_device {
578 /** PCI Device ID */
579 uint32_t device_id;
580 /** Internal chip revision: A0, A1, etc.) */
581 uint32_t chip_rev;
582 uint32_t external_rev;
583 /** Revision id in PCI Config space */
584 uint32_t pci_rev;
585 uint32_t family;
586 uint32_t num_shader_engines;
587 uint32_t num_shader_arrays_per_engine;
588 /* in KHz */
589 uint32_t gpu_counter_freq;
590 uint64_t max_engine_clock;
591 uint64_t max_memory_clock;
592 /* cu information */
593 uint32_t cu_active_number;
594 uint32_t cu_ao_mask;
595 uint32_t cu_bitmap[4][4];
596 /** Render backend pipe mask. One render backend is CB+DB. */
597 uint32_t enabled_rb_pipes_mask;
598 uint32_t num_rb_pipes;
599 uint32_t num_hw_gfx_contexts;
600 uint32_t _pad;
601 uint64_t ids_flags;
602 /** Starting virtual address for UMDs. */
603 uint64_t virtual_address_offset;
604 /** The maximum virtual address */
605 uint64_t virtual_address_max;
606 /** Required alignment of virtual addresses. */
607 uint32_t virtual_address_alignment;
608 /** Page table entry - fragment size */
609 uint32_t pte_fragment_size;
610 uint32_t gart_page_size;
611 /** constant engine ram size*/
612 uint32_t ce_ram_size;
613 /** video memory type info*/
614 uint32_t vram_type;
615 /** video memory bit width*/
616 uint32_t vram_bit_width;
617 /* vce harvesting instance */
618 uint32_t vce_harvest_config;
619};
620
621struct drm_amdgpu_info_hw_ip {
622 /** Version of h/w IP */
623 uint32_t hw_ip_version_major;
624 uint32_t hw_ip_version_minor;
625 /** Capabilities */
626 uint64_t capabilities_flags;
627 /** command buffer address start alignment*/
628 uint32_t ib_start_alignment;
629 /** command buffer size alignment*/
630 uint32_t ib_size_alignment;
631 /** Bitmask of available rings. Bit 0 means ring 0, etc. */
632 uint32_t available_rings;
633 uint32_t _pad;
634};
635
636/*
637 * Supported GPU families
638 */
639#define AMDGPU_FAMILY_UNKNOWN 0
640#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
641#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
642#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
643#define AMDGPU_FAMILY_CZ 135 /* Carrizo */
644
645#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index ff6ef62d084b..3801584a0c53 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -786,6 +786,8 @@ struct drm_prime_handle {
786#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) 786#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
787#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) 787#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
788#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic) 788#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
789#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
790#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
789 791
790/** 792/**
791 * Device specific ioctls should only be in their respective headers 793 * Device specific ioctls should only be in their respective headers
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 07735822a28f..2f295cde657e 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -207,4 +207,19 @@
207 */ 207 */
208#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) 208#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
209 209
210/*
211 * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
212 *
213 * Macroblocks are laid in a Z-shape, and each pixel data is following the
214 * standard NV12 style.
215 * As for NV12, an image is the result of two frame buffers: one for Y,
216 * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
217 * Alignment requirements are (for each buffer):
218 * - multiple of 128 pixels for the width
219 * - multiple of 32 pixels for the height
220 *
221 * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html
222 */
223#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
224
210#endif /* DRM_FOURCC_H */ 225#endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index dbeba949462a..359107ab629e 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -558,4 +558,24 @@ struct drm_mode_atomic {
558 __u64 user_data; 558 __u64 user_data;
559}; 559};
560 560
561/**
562 * Create a new 'blob' data property, copying length bytes from data pointer,
563 * and returning new blob ID.
564 */
565struct drm_mode_create_blob {
566 /** Pointer to data to copy. */
567 __u64 data;
568 /** Length of data to copy. */
569 __u32 length;
570 /** Return: new property ID. */
571 __u32 blob_id;
572};
573
574/**
575 * Destroy a user-created blob property.
576 */
577struct drm_mode_destroy_blob {
578 __u32 blob_id;
579};
580
561#endif 581#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 551b6737f5df..db809b722985 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -171,8 +171,12 @@ typedef struct _drm_i915_sarea {
171#define I915_BOX_TEXTURE_LOAD 0x8 171#define I915_BOX_TEXTURE_LOAD 0x8
172#define I915_BOX_LOST_CONTEXT 0x10 172#define I915_BOX_LOST_CONTEXT 0x10
173 173
174/* I915 specific ioctls 174/*
175 * The device specific ioctl range is 0x40 to 0x79. 175 * i915 specific ioctls.
176 *
177 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
178 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
179 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
176 */ 180 */
177#define DRM_I915_INIT 0x00 181#define DRM_I915_INIT 0x00
178#define DRM_I915_FLUSH 0x01 182#define DRM_I915_FLUSH 0x01
@@ -996,6 +1000,7 @@ struct drm_intel_overlay_put_image {
996/* flags */ 1000/* flags */
997#define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1001#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
998#define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1002#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
1003#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
999struct drm_intel_overlay_attrs { 1004struct drm_intel_overlay_attrs {
1000 __u32 flags; 1005 __u32 flags;
1001 __u32 color_key; 1006 __u32 color_key;
@@ -1065,6 +1070,14 @@ struct drm_i915_reg_read {
1065 __u64 offset; 1070 __u64 offset;
1066 __u64 val; /* Return value */ 1071 __u64 val; /* Return value */
1067}; 1072};
1073/* Known registers:
1074 *
1075 * Render engine timestamp - 0x2358 + 64bit - gen7+
1076 * - Note this register returns an invalid value if using the default
1077 * single instruction 8byte read, in order to workaround that use
1078 * offset (0x2538 | 1) instead.
1079 *
1080 */
1068 1081
1069struct drm_i915_reset_stats { 1082struct drm_i915_reset_stats {
1070 __u32 ctx_id; 1083 __u32 ctx_id;
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 0664c31f010c..75a232b9a970 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -23,7 +23,7 @@
23 23
24/* Please note that modifications to all structs defined here are 24/* Please note that modifications to all structs defined here are
25 * subject to backwards-compatibility constraints: 25 * subject to backwards-compatibility constraints:
26 * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit 26 * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
27 * user/kernel compatibility 27 * user/kernel compatibility
28 * 2) Keep fields aligned to their size 28 * 2) Keep fields aligned to their size
29 * 3) Because of how drm_ioctl() works, we can add new fields at 29 * 3) Because of how drm_ioctl() works, we can add new fields at
@@ -44,8 +44,8 @@
44 * same as 'struct timespec' but 32/64b ABI safe. 44 * same as 'struct timespec' but 32/64b ABI safe.
45 */ 45 */
46struct drm_msm_timespec { 46struct drm_msm_timespec {
47 int64_t tv_sec; /* seconds */ 47 __s64 tv_sec; /* seconds */
48 int64_t tv_nsec; /* nanoseconds */ 48 __s64 tv_nsec; /* nanoseconds */
49}; 49};
50 50
51#define MSM_PARAM_GPU_ID 0x01 51#define MSM_PARAM_GPU_ID 0x01
@@ -53,9 +53,9 @@ struct drm_msm_timespec {
53#define MSM_PARAM_CHIP_ID 0x03 53#define MSM_PARAM_CHIP_ID 0x03
54 54
55struct drm_msm_param { 55struct drm_msm_param {
56 uint32_t pipe; /* in, MSM_PIPE_x */ 56 __u32 pipe; /* in, MSM_PIPE_x */
57 uint32_t param; /* in, MSM_PARAM_x */ 57 __u32 param; /* in, MSM_PARAM_x */
58 uint64_t value; /* out (get_param) or in (set_param) */ 58 __u64 value; /* out (get_param) or in (set_param) */
59}; 59};
60 60
61/* 61/*
@@ -77,15 +77,15 @@ struct drm_msm_param {
77 MSM_BO_UNCACHED) 77 MSM_BO_UNCACHED)
78 78
79struct drm_msm_gem_new { 79struct drm_msm_gem_new {
80 uint64_t size; /* in */ 80 __u64 size; /* in */
81 uint32_t flags; /* in, mask of MSM_BO_x */ 81 __u32 flags; /* in, mask of MSM_BO_x */
82 uint32_t handle; /* out */ 82 __u32 handle; /* out */
83}; 83};
84 84
85struct drm_msm_gem_info { 85struct drm_msm_gem_info {
86 uint32_t handle; /* in */ 86 __u32 handle; /* in */
87 uint32_t pad; 87 __u32 pad;
88 uint64_t offset; /* out, offset to pass to mmap() */ 88 __u64 offset; /* out, offset to pass to mmap() */
89}; 89};
90 90
91#define MSM_PREP_READ 0x01 91#define MSM_PREP_READ 0x01
@@ -95,13 +95,13 @@ struct drm_msm_gem_info {
95#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC) 95#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
96 96
97struct drm_msm_gem_cpu_prep { 97struct drm_msm_gem_cpu_prep {
98 uint32_t handle; /* in */ 98 __u32 handle; /* in */
99 uint32_t op; /* in, mask of MSM_PREP_x */ 99 __u32 op; /* in, mask of MSM_PREP_x */
100 struct drm_msm_timespec timeout; /* in */ 100 struct drm_msm_timespec timeout; /* in */
101}; 101};
102 102
103struct drm_msm_gem_cpu_fini { 103struct drm_msm_gem_cpu_fini {
104 uint32_t handle; /* in */ 104 __u32 handle; /* in */
105}; 105};
106 106
107/* 107/*
@@ -120,11 +120,11 @@ struct drm_msm_gem_cpu_fini {
120 * otherwise EINVAL. 120 * otherwise EINVAL.
121 */ 121 */
122struct drm_msm_gem_submit_reloc { 122struct drm_msm_gem_submit_reloc {
123 uint32_t submit_offset; /* in, offset from submit_bo */ 123 __u32 submit_offset; /* in, offset from submit_bo */
124 uint32_t or; /* in, value OR'd with result */ 124 __u32 or; /* in, value OR'd with result */
125 int32_t shift; /* in, amount of left shift (can be negative) */ 125 __s32 shift; /* in, amount of left shift (can be negative) */
126 uint32_t reloc_idx; /* in, index of reloc_bo buffer */ 126 __u32 reloc_idx; /* in, index of reloc_bo buffer */
127 uint64_t reloc_offset; /* in, offset from start of reloc_bo */ 127 __u64 reloc_offset; /* in, offset from start of reloc_bo */
128}; 128};
129 129
130/* submit-types: 130/* submit-types:
@@ -139,13 +139,13 @@ struct drm_msm_gem_submit_reloc {
139#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002 139#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
140#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003 140#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
141struct drm_msm_gem_submit_cmd { 141struct drm_msm_gem_submit_cmd {
142 uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */ 142 __u32 type; /* in, one of MSM_SUBMIT_CMD_x */
143 uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */ 143 __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
144 uint32_t submit_offset; /* in, offset into submit_bo */ 144 __u32 submit_offset; /* in, offset into submit_bo */
145 uint32_t size; /* in, cmdstream size */ 145 __u32 size; /* in, cmdstream size */
146 uint32_t pad; 146 __u32 pad;
147 uint32_t nr_relocs; /* in, number of submit_reloc's */ 147 __u32 nr_relocs; /* in, number of submit_reloc's */
148 uint64_t __user relocs; /* in, ptr to array of submit_reloc's */ 148 __u64 __user relocs; /* in, ptr to array of submit_reloc's */
149}; 149};
150 150
151/* Each buffer referenced elsewhere in the cmdstream submit (ie. the 151/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -165,9 +165,9 @@ struct drm_msm_gem_submit_cmd {
165#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE) 165#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
166 166
167struct drm_msm_gem_submit_bo { 167struct drm_msm_gem_submit_bo {
168 uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */ 168 __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
169 uint32_t handle; /* in, GEM handle */ 169 __u32 handle; /* in, GEM handle */
170 uint64_t presumed; /* in/out, presumed buffer address */ 170 __u64 presumed; /* in/out, presumed buffer address */
171}; 171};
172 172
173/* Each cmdstream submit consists of a table of buffers involved, and 173/* Each cmdstream submit consists of a table of buffers involved, and
@@ -175,12 +175,12 @@ struct drm_msm_gem_submit_bo {
175 * (context-restore), and IB buffers needed for per tile/bin draw cmds. 175 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
176 */ 176 */
177struct drm_msm_gem_submit { 177struct drm_msm_gem_submit {
178 uint32_t pipe; /* in, MSM_PIPE_x */ 178 __u32 pipe; /* in, MSM_PIPE_x */
179 uint32_t fence; /* out */ 179 __u32 fence; /* out */
180 uint32_t nr_bos; /* in, number of submit_bo's */ 180 __u32 nr_bos; /* in, number of submit_bo's */
181 uint32_t nr_cmds; /* in, number of submit_cmd's */ 181 __u32 nr_cmds; /* in, number of submit_cmd's */
182 uint64_t __user bos; /* in, ptr to array of submit_bo's */ 182 __u64 __user bos; /* in, ptr to array of submit_bo's */
183 uint64_t __user cmds; /* in, ptr to array of submit_cmd's */ 183 __u64 __user cmds; /* in, ptr to array of submit_cmd's */
184}; 184};
185 185
186/* The normal way to synchronize with the GPU is just to CPU_PREP on 186/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -191,8 +191,8 @@ struct drm_msm_gem_submit {
191 * APIs without requiring a dummy bo to synchronize on. 191 * APIs without requiring a dummy bo to synchronize on.
192 */ 192 */
193struct drm_msm_wait_fence { 193struct drm_msm_wait_fence {
194 uint32_t fence; /* in */ 194 __u32 fence; /* in */
195 uint32_t pad; 195 __u32 pad;
196 struct drm_msm_timespec timeout; /* in */ 196 struct drm_msm_timespec timeout; /* in */
197}; 197};
198 198
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 94d44ab2fda1..01aa2a8e3f8d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
@@ -1039,6 +1039,7 @@ struct drm_radeon_cs {
1039#define RADEON_INFO_CURRENT_GPU_MCLK 0x23 1039#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
1040#define RADEON_INFO_READ_REG 0x24 1040#define RADEON_INFO_READ_REG 0x24
1041#define RADEON_INFO_VA_UNMAP_WORKING 0x25 1041#define RADEON_INFO_VA_UNMAP_WORKING 0x25
1042#define RADEON_INFO_GPU_RESET_COUNTER 0x26
1042 1043
1043struct drm_radeon_info { 1044struct drm_radeon_info {
1044 uint32_t request; 1045 uint32_t request;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 1a0006a76b00..1ff9942718fe 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -138,6 +138,7 @@ header-y += genetlink.h
138header-y += gen_stats.h 138header-y += gen_stats.h
139header-y += gfs2_ondisk.h 139header-y += gfs2_ondisk.h
140header-y += gigaset_dev.h 140header-y += gigaset_dev.h
141header-y += gsmmux.h
141header-y += hdlcdrv.h 142header-y += hdlcdrv.h
142header-y += hdlc.h 143header-y += hdlc.h
143header-y += hdreg.h 144header-y += hdreg.h
@@ -271,6 +272,7 @@ header-y += ncp_fs.h
271header-y += ncp.h 272header-y += ncp.h
272header-y += ncp_mount.h 273header-y += ncp_mount.h
273header-y += ncp_no.h 274header-y += ncp_no.h
275header-y += ndctl.h
274header-y += neighbour.h 276header-y += neighbour.h
275header-y += netconf.h 277header-y += netconf.h
276header-y += netdevice.h 278header-y += netdevice.h
@@ -352,6 +354,7 @@ header-y += rtc.h
352header-y += rtnetlink.h 354header-y += rtnetlink.h
353header-y += scc.h 355header-y += scc.h
354header-y += sched.h 356header-y += sched.h
357header-y += scif_ioctl.h
355header-y += screen_info.h 358header-y += screen_info.h
356header-y += sctp.h 359header-y += sctp.h
357header-y += sdla.h 360header-y += sdla.h
@@ -430,6 +433,7 @@ header-y += virtio_balloon.h
430header-y += virtio_blk.h 433header-y += virtio_blk.h
431header-y += virtio_config.h 434header-y += virtio_config.h
432header-y += virtio_console.h 435header-y += virtio_console.h
436header-y += virtio_gpu.h
433header-y += virtio_ids.h 437header-y += virtio_ids.h
434header-y += virtio_input.h 438header-y += virtio_input.h
435header-y += virtio_net.h 439header-y += virtio_net.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a9ebdf5701e8..29ef6f99e43d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -113,6 +113,7 @@ enum bpf_map_type {
113 BPF_MAP_TYPE_UNSPEC, 113 BPF_MAP_TYPE_UNSPEC,
114 BPF_MAP_TYPE_HASH, 114 BPF_MAP_TYPE_HASH,
115 BPF_MAP_TYPE_ARRAY, 115 BPF_MAP_TYPE_ARRAY,
116 BPF_MAP_TYPE_PROG_ARRAY,
116}; 117};
117 118
118enum bpf_prog_type { 119enum bpf_prog_type {
@@ -210,6 +211,44 @@ enum bpf_func_id {
210 * Return: 0 on success 211 * Return: 0 on success
211 */ 212 */
212 BPF_FUNC_l4_csum_replace, 213 BPF_FUNC_l4_csum_replace,
214
215 /**
216 * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
217 * @ctx: context pointer passed to next program
218 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
219 * @index: index inside array that selects specific program to run
220 * Return: 0 on success
221 */
222 BPF_FUNC_tail_call,
223
224 /**
225 * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
226 * @skb: pointer to skb
227 * @ifindex: ifindex of the net device
228 * @flags: bit 0 - if set, redirect to ingress instead of egress
229 * other bits - reserved
230 * Return: 0 on success
231 */
232 BPF_FUNC_clone_redirect,
233
234 /**
235 * u64 bpf_get_current_pid_tgid(void)
236 * Return: current->tgid << 32 | current->pid
237 */
238 BPF_FUNC_get_current_pid_tgid,
239
240 /**
241 * u64 bpf_get_current_uid_gid(void)
242 * Return: current_gid << 32 | current_uid
243 */
244 BPF_FUNC_get_current_uid_gid,
245
246 /**
247 * bpf_get_current_comm(char *buf, int size_of_buf)
248 * stores current->comm into buf
249 * Return: 0 on success
250 */
251 BPF_FUNC_get_current_comm,
213 __BPF_FUNC_MAX_ID, 252 __BPF_FUNC_MAX_ID,
214}; 253};
215 254
@@ -226,6 +265,10 @@ struct __sk_buff {
226 __u32 vlan_tci; 265 __u32 vlan_tci;
227 __u32 vlan_proto; 266 __u32 vlan_proto;
228 __u32 priority; 267 __u32 priority;
268 __u32 ingress_ifindex;
269 __u32 ifindex;
270 __u32 tc_index;
271 __u32 cb[5];
229}; 272};
230 273
231#endif /* _UAPI__LINUX_BPF_H__ */ 274#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 41892f720057..9692cda5f8fc 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -95,11 +95,17 @@ typedef __u32 can_err_mask_t;
95 * @can_dlc: frame payload length in byte (0 .. 8) aka data length code 95 * @can_dlc: frame payload length in byte (0 .. 8) aka data length code
96 * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1 96 * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
97 * mapping of the 'data length code' to the real payload length 97 * mapping of the 'data length code' to the real payload length
98 * @__pad: padding
99 * @__res0: reserved / padding
100 * @__res1: reserved / padding
98 * @data: CAN frame payload (up to 8 byte) 101 * @data: CAN frame payload (up to 8 byte)
99 */ 102 */
100struct can_frame { 103struct can_frame {
101 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ 104 canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
102 __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */ 105 __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
106 __u8 __pad; /* padding */
107 __u8 __res0; /* reserved / padding */
108 __u8 __res1; /* reserved / padding */
103 __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8))); 109 __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
104}; 110};
105 111
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3e6184cf2f6d..5079b9d57e31 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -78,6 +78,7 @@ enum {
78 CGW_FILTER, /* specify struct can_filter on source CAN device */ 78 CGW_FILTER, /* specify struct can_filter on source CAN device */
79 CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */ 79 CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */
80 CGW_LIM_HOPS, /* limit the number of hops of this specific rule */ 80 CGW_LIM_HOPS, /* limit the number of hops of this specific rule */
81 CGW_MOD_UID, /* user defined identifier for modification updates */
81 __CGW_MAX 82 __CGW_MAX
82}; 83};
83 84
@@ -162,6 +163,10 @@ enum {
162 * load time of the can-gw module). This value is used to reduce the number of 163 * load time of the can-gw module). This value is used to reduce the number of
163 * possible hops for this gateway rule to a value smaller then max_hops. 164 * possible hops for this gateway rule to a value smaller then max_hops.
164 * 165 *
166 * CGW_MOD_UID (length 4 bytes):
167 * Optional non-zero user defined routing job identifier to alter existing
168 * modification settings at runtime.
169 *
165 * CGW_CS_XOR (length 4 bytes): 170 * CGW_CS_XOR (length 4 bytes):
166 * Set a simple XOR checksum starting with an initial value into 171 * Set a simple XOR checksum starting with an initial value into
167 * data[result-idx] using data[start-idx] .. data[end-idx] 172 * data[result-idx] using data[start-idx] .. data[end-idx]
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 6497d7933d5b..3ea470f35e40 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -207,8 +207,7 @@ struct cee_pfc {
207#define IEEE_8021QAZ_APP_SEL_ANY 4 207#define IEEE_8021QAZ_APP_SEL_ANY 4
208 208
209/* This structure contains the IEEE 802.1Qaz APP managed object. This 209/* This structure contains the IEEE 802.1Qaz APP managed object. This
210 * object is also used for the CEE std as well. There is no difference 210 * object is also used for the CEE std as well.
211 * between the objects.
212 * 211 *
213 * @selector: protocol identifier type 212 * @selector: protocol identifier type
214 * @protocol: protocol of type indicated 213 * @protocol: protocol of type indicated
@@ -216,13 +215,18 @@ struct cee_pfc {
216 * 8-bit 802.1p user priority bitmap for CEE 215 * 8-bit 802.1p user priority bitmap for CEE
217 * 216 *
218 * ---- 217 * ----
219 * Selector field values 218 * Selector field values for IEEE 802.1Qaz
220 * 0 Reserved 219 * 0 Reserved
221 * 1 Ethertype 220 * 1 Ethertype
222 * 2 Well known port number over TCP or SCTP 221 * 2 Well known port number over TCP or SCTP
223 * 3 Well known port number over UDP or DCCP 222 * 3 Well known port number over UDP or DCCP
224 * 4 Well known port number over TCP, SCTP, UDP, or DCCP 223 * 4 Well known port number over TCP, SCTP, UDP, or DCCP
225 * 5-7 Reserved 224 * 5-7 Reserved
225 *
226 * Selector field values for CEE
227 * 0 Ethertype
228 * 1 Well known port number over TCP or UDP
229 * 2-3 Reserved
226 */ 230 */
227struct dcb_app { 231struct dcb_app {
228 __u8 selector; 232 __u8 selector;
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index eac8c3641f39..061aca3a962d 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
267#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 267#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
268 268
269#define DM_VERSION_MAJOR 4 269#define DM_VERSION_MAJOR 4
270#define DM_VERSION_MINOR 31 270#define DM_VERSION_MINOR 32
271#define DM_VERSION_PATCHLEVEL 0 271#define DM_VERSION_PATCHLEVEL 0
272#define DM_VERSION_EXTRA "-ioctl (2015-3-12)" 272#define DM_VERSION_EXTRA "-ioctl (2015-6-26)"
273 273
274/* Status bits */ 274/* Status bits */
275#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 275#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index b4fb650d9d4f..427e4899ed69 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -32,7 +32,7 @@
32 32
33#define DMX_FILTER_SIZE 16 33#define DMX_FILTER_SIZE 16
34 34
35typedef enum 35enum dmx_output
36{ 36{
37 DMX_OUT_DECODER, /* Streaming directly to decoder. */ 37 DMX_OUT_DECODER, /* Streaming directly to decoder. */
38 DMX_OUT_TAP, /* Output going to a memory buffer */ 38 DMX_OUT_TAP, /* Output going to a memory buffer */
@@ -41,10 +41,11 @@ typedef enum
41 /* (to be retrieved by reading from the */ 41 /* (to be retrieved by reading from the */
42 /* logical DVR device). */ 42 /* logical DVR device). */
43 DMX_OUT_TSDEMUX_TAP /* Like TS_TAP but retrieved from the DMX device */ 43 DMX_OUT_TSDEMUX_TAP /* Like TS_TAP but retrieved from the DMX device */
44} dmx_output_t; 44};
45 45
46typedef enum dmx_output dmx_output_t;
46 47
47typedef enum 48typedef enum dmx_input
48{ 49{
49 DMX_IN_FRONTEND, /* Input from a front-end device. */ 50 DMX_IN_FRONTEND, /* Input from a front-end device. */
50 DMX_IN_DVR /* Input from the logical DVR device. */ 51 DMX_IN_DVR /* Input from the logical DVR device. */
@@ -122,7 +123,7 @@ typedef struct dmx_caps {
122 int num_decoders; 123 int num_decoders;
123} dmx_caps_t; 124} dmx_caps_t;
124 125
125typedef enum { 126typedef enum dmx_source {
126 DMX_SOURCE_FRONT0 = 0, 127 DMX_SOURCE_FRONT0 = 0,
127 DMX_SOURCE_FRONT1, 128 DMX_SOURCE_FRONT1,
128 DMX_SOURCE_FRONT2, 129 DMX_SOURCE_FRONT2,
@@ -139,7 +140,6 @@ struct dmx_stc {
139 __u64 stc; /* output: stc in 'base'*90 kHz units */ 140 __u64 stc; /* output: stc in 'base'*90 kHz units */
140}; 141};
141 142
142
143#define DMX_START _IO('o', 41) 143#define DMX_START _IO('o', 41)
144#define DMX_STOP _IO('o', 42) 144#define DMX_STOP _IO('o', 42)
145#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params) 145#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index c56d77c496a5..00a20cd21ee2 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -28,15 +28,14 @@
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30 30
31typedef enum fe_type { 31enum fe_type {
32 FE_QPSK, 32 FE_QPSK,
33 FE_QAM, 33 FE_QAM,
34 FE_OFDM, 34 FE_OFDM,
35 FE_ATSC 35 FE_ATSC
36} fe_type_t; 36};
37
38 37
39typedef enum fe_caps { 38enum fe_caps {
40 FE_IS_STUPID = 0, 39 FE_IS_STUPID = 0,
41 FE_CAN_INVERSION_AUTO = 0x1, 40 FE_CAN_INVERSION_AUTO = 0x1,
42 FE_CAN_FEC_1_2 = 0x2, 41 FE_CAN_FEC_1_2 = 0x2,
@@ -68,12 +67,11 @@ typedef enum fe_caps {
68 FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */ 67 FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */
69 FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */ 68 FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */
70 FE_CAN_MUTE_TS = 0x80000000 /* frontend can stop spurious TS data output */ 69 FE_CAN_MUTE_TS = 0x80000000 /* frontend can stop spurious TS data output */
71} fe_caps_t; 70};
72
73 71
74struct dvb_frontend_info { 72struct dvb_frontend_info {
75 char name[128]; 73 char name[128];
76 fe_type_t type; /* DEPRECATED. Use DTV_ENUM_DELSYS instead */ 74 enum fe_type type; /* DEPRECATED. Use DTV_ENUM_DELSYS instead */
77 __u32 frequency_min; 75 __u32 frequency_min;
78 __u32 frequency_max; 76 __u32 frequency_max;
79 __u32 frequency_stepsize; 77 __u32 frequency_stepsize;
@@ -82,7 +80,7 @@ struct dvb_frontend_info {
82 __u32 symbol_rate_max; 80 __u32 symbol_rate_max;
83 __u32 symbol_rate_tolerance; /* ppm */ 81 __u32 symbol_rate_tolerance; /* ppm */
84 __u32 notifier_delay; /* DEPRECATED */ 82 __u32 notifier_delay; /* DEPRECATED */
85 fe_caps_t caps; 83 enum fe_caps caps;
86}; 84};
87 85
88 86
@@ -95,32 +93,27 @@ struct dvb_diseqc_master_cmd {
95 __u8 msg_len; /* valid values are 3...6 */ 93 __u8 msg_len; /* valid values are 3...6 */
96}; 94};
97 95
98
99struct dvb_diseqc_slave_reply { 96struct dvb_diseqc_slave_reply {
100 __u8 msg [4]; /* { framing, data [3] } */ 97 __u8 msg [4]; /* { framing, data [3] } */
101 __u8 msg_len; /* valid values are 0...4, 0 means no msg */ 98 __u8 msg_len; /* valid values are 0...4, 0 means no msg */
102 int timeout; /* return from ioctl after timeout ms with */ 99 int timeout; /* return from ioctl after timeout ms with */
103}; /* errorcode when no message was received */ 100}; /* errorcode when no message was received */
104 101
105 102enum fe_sec_voltage {
106typedef enum fe_sec_voltage {
107 SEC_VOLTAGE_13, 103 SEC_VOLTAGE_13,
108 SEC_VOLTAGE_18, 104 SEC_VOLTAGE_18,
109 SEC_VOLTAGE_OFF 105 SEC_VOLTAGE_OFF
110} fe_sec_voltage_t; 106};
111
112 107
113typedef enum fe_sec_tone_mode { 108enum fe_sec_tone_mode {
114 SEC_TONE_ON, 109 SEC_TONE_ON,
115 SEC_TONE_OFF 110 SEC_TONE_OFF
116} fe_sec_tone_mode_t; 111};
117
118 112
119typedef enum fe_sec_mini_cmd { 113enum fe_sec_mini_cmd {
120 SEC_MINI_A, 114 SEC_MINI_A,
121 SEC_MINI_B 115 SEC_MINI_B
122} fe_sec_mini_cmd_t; 116};
123
124 117
125/** 118/**
126 * enum fe_status - enumerates the possible frontend status 119 * enum fe_status - enumerates the possible frontend status
@@ -133,8 +126,7 @@ typedef enum fe_sec_mini_cmd {
133 * @FE_REINIT: frontend was reinitialized, application is recommended 126 * @FE_REINIT: frontend was reinitialized, application is recommended
134 * to reset DiSEqC, tone and parameters 127 * to reset DiSEqC, tone and parameters
135 */ 128 */
136 129enum fe_status {
137typedef enum fe_status {
138 FE_HAS_SIGNAL = 0x01, 130 FE_HAS_SIGNAL = 0x01,
139 FE_HAS_CARRIER = 0x02, 131 FE_HAS_CARRIER = 0x02,
140 FE_HAS_VITERBI = 0x04, 132 FE_HAS_VITERBI = 0x04,
@@ -142,16 +134,15 @@ typedef enum fe_status {
142 FE_HAS_LOCK = 0x10, 134 FE_HAS_LOCK = 0x10,
143 FE_TIMEDOUT = 0x20, 135 FE_TIMEDOUT = 0x20,
144 FE_REINIT = 0x40, 136 FE_REINIT = 0x40,
145} fe_status_t; 137};
146 138
147typedef enum fe_spectral_inversion { 139enum fe_spectral_inversion {
148 INVERSION_OFF, 140 INVERSION_OFF,
149 INVERSION_ON, 141 INVERSION_ON,
150 INVERSION_AUTO 142 INVERSION_AUTO
151} fe_spectral_inversion_t; 143};
152
153 144
154typedef enum fe_code_rate { 145enum fe_code_rate {
155 FEC_NONE = 0, 146 FEC_NONE = 0,
156 FEC_1_2, 147 FEC_1_2,
157 FEC_2_3, 148 FEC_2_3,
@@ -165,10 +156,9 @@ typedef enum fe_code_rate {
165 FEC_3_5, 156 FEC_3_5,
166 FEC_9_10, 157 FEC_9_10,
167 FEC_2_5, 158 FEC_2_5,
168} fe_code_rate_t; 159};
169
170 160
171typedef enum fe_modulation { 161enum fe_modulation {
172 QPSK, 162 QPSK,
173 QAM_16, 163 QAM_16,
174 QAM_32, 164 QAM_32,
@@ -183,9 +173,9 @@ typedef enum fe_modulation {
183 APSK_32, 173 APSK_32,
184 DQPSK, 174 DQPSK,
185 QAM_4_NR, 175 QAM_4_NR,
186} fe_modulation_t; 176};
187 177
188typedef enum fe_transmit_mode { 178enum fe_transmit_mode {
189 TRANSMISSION_MODE_2K, 179 TRANSMISSION_MODE_2K,
190 TRANSMISSION_MODE_8K, 180 TRANSMISSION_MODE_8K,
191 TRANSMISSION_MODE_AUTO, 181 TRANSMISSION_MODE_AUTO,
@@ -195,21 +185,9 @@ typedef enum fe_transmit_mode {
195 TRANSMISSION_MODE_32K, 185 TRANSMISSION_MODE_32K,
196 TRANSMISSION_MODE_C1, 186 TRANSMISSION_MODE_C1,
197 TRANSMISSION_MODE_C3780, 187 TRANSMISSION_MODE_C3780,
198} fe_transmit_mode_t; 188};
199
200#if defined(__DVB_CORE__) || !defined (__KERNEL__)
201typedef enum fe_bandwidth {
202 BANDWIDTH_8_MHZ,
203 BANDWIDTH_7_MHZ,
204 BANDWIDTH_6_MHZ,
205 BANDWIDTH_AUTO,
206 BANDWIDTH_5_MHZ,
207 BANDWIDTH_10_MHZ,
208 BANDWIDTH_1_712_MHZ,
209} fe_bandwidth_t;
210#endif
211 189
212typedef enum fe_guard_interval { 190enum fe_guard_interval {
213 GUARD_INTERVAL_1_32, 191 GUARD_INTERVAL_1_32,
214 GUARD_INTERVAL_1_16, 192 GUARD_INTERVAL_1_16,
215 GUARD_INTERVAL_1_8, 193 GUARD_INTERVAL_1_8,
@@ -221,16 +199,15 @@ typedef enum fe_guard_interval {
221 GUARD_INTERVAL_PN420, 199 GUARD_INTERVAL_PN420,
222 GUARD_INTERVAL_PN595, 200 GUARD_INTERVAL_PN595,
223 GUARD_INTERVAL_PN945, 201 GUARD_INTERVAL_PN945,
224} fe_guard_interval_t; 202};
225
226 203
227typedef enum fe_hierarchy { 204enum fe_hierarchy {
228 HIERARCHY_NONE, 205 HIERARCHY_NONE,
229 HIERARCHY_1, 206 HIERARCHY_1,
230 HIERARCHY_2, 207 HIERARCHY_2,
231 HIERARCHY_4, 208 HIERARCHY_4,
232 HIERARCHY_AUTO 209 HIERARCHY_AUTO
233} fe_hierarchy_t; 210};
234 211
235enum fe_interleaving { 212enum fe_interleaving {
236 INTERLEAVING_NONE, 213 INTERLEAVING_NONE,
@@ -239,51 +216,6 @@ enum fe_interleaving {
239 INTERLEAVING_720, 216 INTERLEAVING_720,
240}; 217};
241 218
242#if defined(__DVB_CORE__) || !defined (__KERNEL__)
243struct dvb_qpsk_parameters {
244 __u32 symbol_rate; /* symbol rate in Symbols per second */
245 fe_code_rate_t fec_inner; /* forward error correction (see above) */
246};
247
248struct dvb_qam_parameters {
249 __u32 symbol_rate; /* symbol rate in Symbols per second */
250 fe_code_rate_t fec_inner; /* forward error correction (see above) */
251 fe_modulation_t modulation; /* modulation type (see above) */
252};
253
254struct dvb_vsb_parameters {
255 fe_modulation_t modulation; /* modulation type (see above) */
256};
257
258struct dvb_ofdm_parameters {
259 fe_bandwidth_t bandwidth;
260 fe_code_rate_t code_rate_HP; /* high priority stream code rate */
261 fe_code_rate_t code_rate_LP; /* low priority stream code rate */
262 fe_modulation_t constellation; /* modulation type (see above) */
263 fe_transmit_mode_t transmission_mode;
264 fe_guard_interval_t guard_interval;
265 fe_hierarchy_t hierarchy_information;
266};
267
268
269struct dvb_frontend_parameters {
270 __u32 frequency; /* (absolute) frequency in Hz for QAM/OFDM/ATSC */
271 /* intermediate frequency in kHz for QPSK */
272 fe_spectral_inversion_t inversion;
273 union {
274 struct dvb_qpsk_parameters qpsk;
275 struct dvb_qam_parameters qam;
276 struct dvb_ofdm_parameters ofdm;
277 struct dvb_vsb_parameters vsb;
278 } u;
279};
280
281struct dvb_frontend_event {
282 fe_status_t status;
283 struct dvb_frontend_parameters parameters;
284};
285#endif
286
287/* S2API Commands */ 219/* S2API Commands */
288#define DTV_UNDEFINED 0 220#define DTV_UNDEFINED 0
289#define DTV_TUNE 1 221#define DTV_TUNE 1
@@ -377,20 +309,20 @@ struct dvb_frontend_event {
377 309
378#define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT 310#define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT
379 311
380typedef enum fe_pilot { 312enum fe_pilot {
381 PILOT_ON, 313 PILOT_ON,
382 PILOT_OFF, 314 PILOT_OFF,
383 PILOT_AUTO, 315 PILOT_AUTO,
384} fe_pilot_t; 316};
385 317
386typedef enum fe_rolloff { 318enum fe_rolloff {
387 ROLLOFF_35, /* Implied value in DVB-S, default for DVB-S2 */ 319 ROLLOFF_35, /* Implied value in DVB-S, default for DVB-S2 */
388 ROLLOFF_20, 320 ROLLOFF_20,
389 ROLLOFF_25, 321 ROLLOFF_25,
390 ROLLOFF_AUTO, 322 ROLLOFF_AUTO,
391} fe_rolloff_t; 323};
392 324
393typedef enum fe_delivery_system { 325enum fe_delivery_system {
394 SYS_UNDEFINED, 326 SYS_UNDEFINED,
395 SYS_DVBC_ANNEX_A, 327 SYS_DVBC_ANNEX_A,
396 SYS_DVBC_ANNEX_B, 328 SYS_DVBC_ANNEX_B,
@@ -410,7 +342,7 @@ typedef enum fe_delivery_system {
410 SYS_DVBT2, 342 SYS_DVBT2,
411 SYS_TURBO, 343 SYS_TURBO,
412 SYS_DVBC_ANNEX_C, 344 SYS_DVBC_ANNEX_C,
413} fe_delivery_system_t; 345};
414 346
415/* backward compatibility */ 347/* backward compatibility */
416#define SYS_DVBC_ANNEX_AC SYS_DVBC_ANNEX_A 348#define SYS_DVBC_ANNEX_AC SYS_DVBC_ANNEX_A
@@ -467,7 +399,7 @@ struct dtv_cmds_h {
467 * @FE_SCALE_NOT_AVAILABLE: That QoS measure is not available. That 399 * @FE_SCALE_NOT_AVAILABLE: That QoS measure is not available. That
468 * could indicate a temporary or a permanent 400 * could indicate a temporary or a permanent
469 * condition. 401 * condition.
470 * @FE_SCALE_DECIBEL: The scale is measured in 0.0001 dB steps, typically 402 * @FE_SCALE_DECIBEL: The scale is measured in 0.001 dB steps, typically
471 * used on signal measures. 403 * used on signal measures.
472 * @FE_SCALE_RELATIVE: The scale is a relative percentual measure, 404 * @FE_SCALE_RELATIVE: The scale is a relative percentual measure,
473 * ranging from 0 (0%) to 0xffff (100%). 405 * ranging from 0 (0%) to 0xffff (100%).
@@ -503,20 +435,20 @@ enum fecap_scale_params {
503 * 435 *
504 * In other words, for ISDB, those values should be filled like: 436 * In other words, for ISDB, those values should be filled like:
505 * u.st.stat.svalue[0] = global statistics; 437 * u.st.stat.svalue[0] = global statistics;
506 * u.st.stat.scale[0] = FE_SCALE_DECIBELS; 438 * u.st.stat.scale[0] = FE_SCALE_DECIBEL;
507 * u.st.stat.value[1] = layer A statistics; 439 * u.st.stat.value[1] = layer A statistics;
508 * u.st.stat.scale[1] = FE_SCALE_NOT_AVAILABLE (if not available); 440 * u.st.stat.scale[1] = FE_SCALE_NOT_AVAILABLE (if not available);
509 * u.st.stat.svalue[2] = layer B statistics; 441 * u.st.stat.svalue[2] = layer B statistics;
510 * u.st.stat.scale[2] = FE_SCALE_DECIBELS; 442 * u.st.stat.scale[2] = FE_SCALE_DECIBEL;
511 * u.st.stat.svalue[3] = layer C statistics; 443 * u.st.stat.svalue[3] = layer C statistics;
512 * u.st.stat.scale[3] = FE_SCALE_DECIBELS; 444 * u.st.stat.scale[3] = FE_SCALE_DECIBEL;
513 * u.st.len = 4; 445 * u.st.len = 4;
514 */ 446 */
515struct dtv_stats { 447struct dtv_stats {
516 __u8 scale; /* enum fecap_scale_params type */ 448 __u8 scale; /* enum fecap_scale_params type */
517 union { 449 union {
518 __u64 uvalue; /* for counters and relative scales */ 450 __u64 uvalue; /* for counters and relative scales */
519 __s64 svalue; /* for 0.0001 dB measures */ 451 __s64 svalue; /* for 0.001 dB measures */
520 }; 452 };
521} __attribute__ ((packed)); 453} __attribute__ ((packed));
522 454
@@ -552,10 +484,88 @@ struct dtv_properties {
552 struct dtv_property *props; 484 struct dtv_property *props;
553}; 485};
554 486
487#if defined(__DVB_CORE__) || !defined (__KERNEL__)
488
489/*
490 * DEPRECATED: The DVBv3 ioctls, structs and enums should not be used on
491 * newer programs, as it doesn't support the second generation of digital
492 * TV standards, nor supports newer delivery systems.
493 */
494
495enum fe_bandwidth {
496 BANDWIDTH_8_MHZ,
497 BANDWIDTH_7_MHZ,
498 BANDWIDTH_6_MHZ,
499 BANDWIDTH_AUTO,
500 BANDWIDTH_5_MHZ,
501 BANDWIDTH_10_MHZ,
502 BANDWIDTH_1_712_MHZ,
503};
504
505/* This is needed for legacy userspace support */
506typedef enum fe_sec_voltage fe_sec_voltage_t;
507typedef enum fe_caps fe_caps_t;
508typedef enum fe_type fe_type_t;
509typedef enum fe_sec_tone_mode fe_sec_tone_mode_t;
510typedef enum fe_sec_mini_cmd fe_sec_mini_cmd_t;
511typedef enum fe_status fe_status_t;
512typedef enum fe_spectral_inversion fe_spectral_inversion_t;
513typedef enum fe_code_rate fe_code_rate_t;
514typedef enum fe_modulation fe_modulation_t;
515typedef enum fe_transmit_mode fe_transmit_mode_t;
516typedef enum fe_bandwidth fe_bandwidth_t;
517typedef enum fe_guard_interval fe_guard_interval_t;
518typedef enum fe_hierarchy fe_hierarchy_t;
519typedef enum fe_pilot fe_pilot_t;
520typedef enum fe_rolloff fe_rolloff_t;
521typedef enum fe_delivery_system fe_delivery_system_t;
522
523struct dvb_qpsk_parameters {
524 __u32 symbol_rate; /* symbol rate in Symbols per second */
525 fe_code_rate_t fec_inner; /* forward error correction (see above) */
526};
527
528struct dvb_qam_parameters {
529 __u32 symbol_rate; /* symbol rate in Symbols per second */
530 fe_code_rate_t fec_inner; /* forward error correction (see above) */
531 fe_modulation_t modulation; /* modulation type (see above) */
532};
533
534struct dvb_vsb_parameters {
535 fe_modulation_t modulation; /* modulation type (see above) */
536};
537
538struct dvb_ofdm_parameters {
539 fe_bandwidth_t bandwidth;
540 fe_code_rate_t code_rate_HP; /* high priority stream code rate */
541 fe_code_rate_t code_rate_LP; /* low priority stream code rate */
542 fe_modulation_t constellation; /* modulation type (see above) */
543 fe_transmit_mode_t transmission_mode;
544 fe_guard_interval_t guard_interval;
545 fe_hierarchy_t hierarchy_information;
546};
547
548struct dvb_frontend_parameters {
549 __u32 frequency; /* (absolute) frequency in Hz for DVB-C/DVB-T/ATSC */
550 /* intermediate frequency in kHz for DVB-S */
551 fe_spectral_inversion_t inversion;
552 union {
553 struct dvb_qpsk_parameters qpsk; /* DVB-S */
554 struct dvb_qam_parameters qam; /* DVB-C */
555 struct dvb_ofdm_parameters ofdm; /* DVB-T */
556 struct dvb_vsb_parameters vsb; /* ATSC */
557 } u;
558};
559
560struct dvb_frontend_event {
561 fe_status_t status;
562 struct dvb_frontend_parameters parameters;
563};
564#endif
565
555#define FE_SET_PROPERTY _IOW('o', 82, struct dtv_properties) 566#define FE_SET_PROPERTY _IOW('o', 82, struct dtv_properties)
556#define FE_GET_PROPERTY _IOR('o', 83, struct dtv_properties) 567#define FE_GET_PROPERTY _IOR('o', 83, struct dtv_properties)
557 568
558
559/** 569/**
560 * When set, this flag will disable any zigzagging or other "normal" tuning 570 * When set, this flag will disable any zigzagging or other "normal" tuning
561 * behaviour. Additionally, there will be no automatic monitoring of the lock 571 * behaviour. Additionally, there will be no automatic monitoring of the lock
@@ -565,7 +575,6 @@ struct dtv_properties {
565 */ 575 */
566#define FE_TUNE_MODE_ONESHOT 0x01 576#define FE_TUNE_MODE_ONESHOT 0x01
567 577
568
569#define FE_GET_INFO _IOR('o', 61, struct dvb_frontend_info) 578#define FE_GET_INFO _IOR('o', 61, struct dvb_frontend_info)
570 579
571#define FE_DISEQC_RESET_OVERLOAD _IO('o', 62) 580#define FE_DISEQC_RESET_OVERLOAD _IO('o', 62)
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index ae99f7743cf4..b08829667ed7 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -25,6 +25,7 @@
25#define EM_ARM 40 /* ARM 32 bit */ 25#define EM_ARM 40 /* ARM 32 bit */
26#define EM_SH 42 /* SuperH */ 26#define EM_SH 42 /* SuperH */
27#define EM_SPARCV9 43 /* SPARC v9 64-bit */ 27#define EM_SPARCV9 43 /* SPARC v9 64-bit */
28#define EM_H8_300 46 /* Renesas H8/300 */
28#define EM_IA_64 50 /* HP/Intel IA-64 */ 29#define EM_IA_64 50 /* HP/Intel IA-64 */
29#define EM_X86_64 62 /* AMD x86-64 */ 30#define EM_X86_64 62 /* AMD x86-64 */
30#define EM_S390 22 /* IBM S/390 */ 31#define EM_S390 22 /* IBM S/390 */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 2e49fc880d29..cd67aec187d9 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -215,6 +215,11 @@ enum tunable_id {
215 ETHTOOL_ID_UNSPEC, 215 ETHTOOL_ID_UNSPEC,
216 ETHTOOL_RX_COPYBREAK, 216 ETHTOOL_RX_COPYBREAK,
217 ETHTOOL_TX_COPYBREAK, 217 ETHTOOL_TX_COPYBREAK,
218 /*
219 * Add your fresh new tubale attribute above and remember to update
220 * tunable_strings[] in net/core/ethtool.c
221 */
222 __ETHTOOL_TUNABLE_COUNT,
218}; 223};
219 224
220enum tunable_type_id { 225enum tunable_type_id {
@@ -545,6 +550,7 @@ enum ethtool_stringset {
545 ETH_SS_NTUPLE_FILTERS, 550 ETH_SS_NTUPLE_FILTERS,
546 ETH_SS_FEATURES, 551 ETH_SS_FEATURES,
547 ETH_SS_RSS_HASH_FUNCS, 552 ETH_SS_RSS_HASH_FUNCS,
553 ETH_SS_TUNABLES,
548}; 554};
549 555
550/** 556/**
@@ -796,6 +802,31 @@ struct ethtool_rx_flow_spec {
796 __u32 location; 802 __u32 location;
797}; 803};
798 804
805/* How rings are layed out when accessing virtual functions or
806 * offloaded queues is device specific. To allow users to do flow
807 * steering and specify these queues the ring cookie is partitioned
808 * into a 32bit queue index with an 8 bit virtual function id.
809 * This also leaves the 3bytes for further specifiers. It is possible
810 * future devices may support more than 256 virtual functions if
811 * devices start supporting PCIe w/ARI. However at the moment I
812 * do not know of any devices that support this so I do not reserve
813 * space for this at this time. If a future patch consumes the next
814 * byte it should be aware of this possiblity.
815 */
816#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
817#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
818#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
819static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
820{
821 return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
822};
823
824static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
825{
826 return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
827 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
828};
829
799/** 830/**
800 * struct ethtool_rxnfc - command to get or set RX flow classification rules 831 * struct ethtool_rxnfc - command to get or set RX flow classification rules
801 * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, 832 * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
@@ -1264,15 +1295,19 @@ enum ethtool_sfeatures_retval_bits {
1264 * it was forced up into this mode or autonegotiated. 1295 * it was forced up into this mode or autonegotiated.
1265 */ 1296 */
1266 1297
1267/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */ 1298/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */
1268#define SPEED_10 10 1299#define SPEED_10 10
1269#define SPEED_100 100 1300#define SPEED_100 100
1270#define SPEED_1000 1000 1301#define SPEED_1000 1000
1271#define SPEED_2500 2500 1302#define SPEED_2500 2500
1303#define SPEED_5000 5000
1272#define SPEED_10000 10000 1304#define SPEED_10000 10000
1273#define SPEED_20000 20000 1305#define SPEED_20000 20000
1306#define SPEED_25000 25000
1274#define SPEED_40000 40000 1307#define SPEED_40000 40000
1308#define SPEED_50000 50000
1275#define SPEED_56000 56000 1309#define SPEED_56000 56000
1310#define SPEED_100000 100000
1276 1311
1277#define SPEED_UNKNOWN -1 1312#define SPEED_UNKNOWN -1
1278 1313
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 25084a052a1e..c9aca042e61d 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -755,4 +755,7 @@ struct fuse_notify_retrieve_in {
755 uint64_t dummy4; 755 uint64_t dummy4;
756}; 756};
757 757
758/* Device ioctls: */
759#define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t)
760
758#endif /* _LINUX_FUSE_H */ 761#endif /* _LINUX_FUSE_H */
diff --git a/include/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index c25e9477f7c3..c06742d52856 100644
--- a/include/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -1,6 +1,9 @@
1#ifndef _LINUX_GSMMUX_H 1#ifndef _LINUX_GSMMUX_H
2#define _LINUX_GSMMUX_H 2#define _LINUX_GSMMUX_H
3 3
4#include <linux/if.h>
5#include <linux/ioctl.h>
6
4struct gsm_config 7struct gsm_config
5{ 8{
6 unsigned int adaption; 9 unsigned int adaption;
diff --git a/include/uapi/linux/hsi/cs-protocol.h b/include/uapi/linux/hsi/cs-protocol.h
index 4957bba57cbe..f153d6ea7c62 100644
--- a/include/uapi/linux/hsi/cs-protocol.h
+++ b/include/uapi/linux/hsi/cs-protocol.h
@@ -76,6 +76,15 @@ struct cs_buffer_config {
76}; 76};
77 77
78/* 78/*
79 * struct for monotonic timestamp taken when the
80 * last control command was received
81 */
82struct cs_timestamp {
83 __u32 tv_sec; /* seconds */
84 __u32 tv_nsec; /* nanoseconds */
85};
86
87/*
79 * Struct describing the layout and contents of the driver mmap area. 88 * Struct describing the layout and contents of the driver mmap area.
80 * This information is meant as read-only information for the application. 89 * This information is meant as read-only information for the application.
81 */ 90 */
@@ -91,11 +100,8 @@ struct cs_mmap_config_block {
91 __u32 rx_ptr; 100 __u32 rx_ptr;
92 __u32 rx_ptr_boundary; 101 __u32 rx_ptr_boundary;
93 __u32 reserved3[2]; 102 __u32 reserved3[2];
94 /* 103 /* enabled with CS_FEAT_TSTAMP_RX_CTRL */
95 * if enabled with CS_FEAT_TSTAMP_RX_CTRL, monotonic 104 struct cs_timestamp tstamp_rx_ctrl;
96 * timestamp taken when the last control command was received
97 */
98 struct timespec tstamp_rx_ctrl;
99}; 105};
100 106
101#define CS_IO_MAGIC 'C' 107#define CS_IO_MAGIC 'C'
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index bb1cb73c927a..e4c0a35d6417 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -45,6 +45,11 @@
45 45
46#define VSS_OP_REGISTER 128 46#define VSS_OP_REGISTER 128
47 47
48/*
49 Daemon code with full handshake support.
50 */
51#define VSS_OP_REGISTER1 129
52
48enum hv_vss_op { 53enum hv_vss_op {
49 VSS_OP_CREATE = 0, 54 VSS_OP_CREATE = 0,
50 VSS_OP_DELETE, 55 VSS_OP_DELETE,
@@ -100,7 +105,8 @@ struct hv_vss_msg {
100 */ 105 */
101 106
102#define FCOPY_VERSION_0 0 107#define FCOPY_VERSION_0 0
103#define FCOPY_CURRENT_VERSION FCOPY_VERSION_0 108#define FCOPY_VERSION_1 1
109#define FCOPY_CURRENT_VERSION FCOPY_VERSION_1
104#define W_MAX_PATH 260 110#define W_MAX_PATH 260
105 111
106enum hv_fcopy_op { 112enum hv_fcopy_op {
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index 0e949cbfd333..b0a7dd61eb35 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -87,6 +87,7 @@ struct i2c_msg {
87#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */ 87#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */
88#define I2C_FUNC_SMBUS_PEC 0x00000008 88#define I2C_FUNC_SMBUS_PEC 0x00000008
89#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */ 89#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
90#define I2C_FUNC_SLAVE 0x00000020
90#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ 91#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
91#define I2C_FUNC_SMBUS_QUICK 0x00010000 92#define I2C_FUNC_SMBUS_QUICK 0x00010000
92#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 93#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index d9cd19214b98..2c7e8e3d3981 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -390,6 +390,17 @@ struct ifla_vxlan_port_range {
390 __be16 high; 390 __be16 high;
391}; 391};
392 392
393/* GENEVE section */
394enum {
395 IFLA_GENEVE_UNSPEC,
396 IFLA_GENEVE_ID,
397 IFLA_GENEVE_REMOTE,
398 IFLA_GENEVE_TTL,
399 IFLA_GENEVE_TOS,
400 __IFLA_GENEVE_MAX
401};
402#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
403
393/* Bonding section */ 404/* Bonding section */
394 405
395enum { 406enum {
@@ -417,6 +428,9 @@ enum {
417 IFLA_BOND_AD_LACP_RATE, 428 IFLA_BOND_AD_LACP_RATE,
418 IFLA_BOND_AD_SELECT, 429 IFLA_BOND_AD_SELECT,
419 IFLA_BOND_AD_INFO, 430 IFLA_BOND_AD_INFO,
431 IFLA_BOND_AD_ACTOR_SYS_PRIO,
432 IFLA_BOND_AD_USER_PORT_KEY,
433 IFLA_BOND_AD_ACTOR_SYSTEM,
420 __IFLA_BOND_MAX, 434 __IFLA_BOND_MAX,
421}; 435};
422 436
@@ -442,6 +456,8 @@ enum {
442 IFLA_BOND_SLAVE_PERM_HWADDR, 456 IFLA_BOND_SLAVE_PERM_HWADDR,
443 IFLA_BOND_SLAVE_QUEUE_ID, 457 IFLA_BOND_SLAVE_QUEUE_ID,
444 IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, 458 IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
459 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
460 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
445 __IFLA_BOND_SLAVE_MAX, 461 __IFLA_BOND_SLAVE_MAX,
446}; 462};
447 463
@@ -468,6 +484,7 @@ enum {
468 IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query 484 IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query
469 * on/off switch 485 * on/off switch
470 */ 486 */
487 IFLA_VF_STATS, /* network device statistics */
471 __IFLA_VF_MAX, 488 __IFLA_VF_MAX,
472}; 489};
473 490
@@ -517,6 +534,18 @@ struct ifla_vf_rss_query_en {
517 __u32 setting; 534 __u32 setting;
518}; 535};
519 536
537enum {
538 IFLA_VF_STATS_RX_PACKETS,
539 IFLA_VF_STATS_TX_PACKETS,
540 IFLA_VF_STATS_RX_BYTES,
541 IFLA_VF_STATS_TX_BYTES,
542 IFLA_VF_STATS_BROADCAST,
543 IFLA_VF_STATS_MULTICAST,
544 __IFLA_VF_STATS_MAX,
545};
546
547#define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
548
520/* VF ports management section 549/* VF ports management section
521 * 550 *
522 * Nested layout of set/get msg is: 551 * Nested layout of set/get msg is:
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 053bd102fbe0..d3d715f8c88f 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -54,6 +54,7 @@ struct sockaddr_ll {
54#define PACKET_FANOUT 18 54#define PACKET_FANOUT 18
55#define PACKET_TX_HAS_OFF 19 55#define PACKET_TX_HAS_OFF 19
56#define PACKET_QDISC_BYPASS 20 56#define PACKET_QDISC_BYPASS 20
57#define PACKET_ROLLOVER_STATS 21
57 58
58#define PACKET_FANOUT_HASH 0 59#define PACKET_FANOUT_HASH 0
59#define PACKET_FANOUT_LB 1 60#define PACKET_FANOUT_LB 1
@@ -75,6 +76,12 @@ struct tpacket_stats_v3 {
75 unsigned int tp_freeze_q_cnt; 76 unsigned int tp_freeze_q_cnt;
76}; 77};
77 78
79struct tpacket_rollover_stats {
80 __aligned_u64 tp_all;
81 __aligned_u64 tp_huge;
82 __aligned_u64 tp_failed;
83};
84
78union tpacket_stats_u { 85union tpacket_stats_u {
79 struct tpacket_stats stats1; 86 struct tpacket_stats stats1;
80 struct tpacket_stats_v3 stats3; 87 struct tpacket_stats_v3 stats3;
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 50ae24335444..3cb5e1d85ddd 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -50,6 +50,12 @@
50#define TUNGETFILTER _IOR('T', 219, struct sock_fprog) 50#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
51#define TUNSETVNETLE _IOW('T', 220, int) 51#define TUNSETVNETLE _IOW('T', 220, int)
52#define TUNGETVNETLE _IOR('T', 221, int) 52#define TUNGETVNETLE _IOR('T', 221, int)
53/* The TUNSETVNETBE and TUNGETVNETBE ioctls are for cross-endian support on
54 * little-endian hosts. Not all kernel configurations support them, but all
55 * configurations that support SET also support GET.
56 */
57#define TUNSETVNETBE _IOW('T', 222, int)
58#define TUNGETVNETBE _IOR('T', 223, int)
53 59
54/* TUNSETIFF ifr flags */ 60/* TUNSETIFF ifr flags */
55#define IFF_TUN 0x0001 61#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 5c4601935005..2f8b11722204 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -70,6 +70,8 @@ enum iio_modifier {
70 IIO_MOD_WALKING, 70 IIO_MOD_WALKING,
71 IIO_MOD_STILL, 71 IIO_MOD_STILL,
72 IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z, 72 IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z,
73 IIO_MOD_I,
74 IIO_MOD_Q,
73}; 75};
74 76
75enum iio_event_type { 77enum iio_event_type {
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 589ced069e8a..eaf94919291a 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -19,8 +19,10 @@
19#define _UAPI_LINUX_IN_H 19#define _UAPI_LINUX_IN_H
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/libc-compat.h>
22#include <linux/socket.h> 23#include <linux/socket.h>
23 24
25#if __UAPI_DEF_IN_IPPROTO
24/* Standard well-defined IP protocols. */ 26/* Standard well-defined IP protocols. */
25enum { 27enum {
26 IPPROTO_IP = 0, /* Dummy protocol for TCP */ 28 IPPROTO_IP = 0, /* Dummy protocol for TCP */
@@ -69,16 +71,20 @@ enum {
69#define IPPROTO_SCTP IPPROTO_SCTP 71#define IPPROTO_SCTP IPPROTO_SCTP
70 IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */ 72 IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
71#define IPPROTO_UDPLITE IPPROTO_UDPLITE 73#define IPPROTO_UDPLITE IPPROTO_UDPLITE
74 IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
75#define IPPROTO_MPLS IPPROTO_MPLS
72 IPPROTO_RAW = 255, /* Raw IP packets */ 76 IPPROTO_RAW = 255, /* Raw IP packets */
73#define IPPROTO_RAW IPPROTO_RAW 77#define IPPROTO_RAW IPPROTO_RAW
74 IPPROTO_MAX 78 IPPROTO_MAX
75}; 79};
80#endif
76 81
77 82#if __UAPI_DEF_IN_ADDR
78/* Internet address. */ 83/* Internet address. */
79struct in_addr { 84struct in_addr {
80 __be32 s_addr; 85 __be32 s_addr;
81}; 86};
87#endif
82 88
83#define IP_TOS 1 89#define IP_TOS 1
84#define IP_TTL 2 90#define IP_TTL 2
@@ -110,6 +116,7 @@ struct in_addr {
110#define IP_MINTTL 21 116#define IP_MINTTL 21
111#define IP_NODEFRAG 22 117#define IP_NODEFRAG 22
112#define IP_CHECKSUM 23 118#define IP_CHECKSUM 23
119#define IP_BIND_ADDRESS_NO_PORT 24
113 120
114/* IP_MTU_DISCOVER values */ 121/* IP_MTU_DISCOVER values */
115#define IP_PMTUDISC_DONT 0 /* Never send DF frames */ 122#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
@@ -155,6 +162,7 @@ struct in_addr {
155 162
156/* Request struct for multicast socket ops */ 163/* Request struct for multicast socket ops */
157 164
165#if __UAPI_DEF_IP_MREQ
158struct ip_mreq { 166struct ip_mreq {
159 struct in_addr imr_multiaddr; /* IP multicast address of group */ 167 struct in_addr imr_multiaddr; /* IP multicast address of group */
160 struct in_addr imr_interface; /* local IP address of interface */ 168 struct in_addr imr_interface; /* local IP address of interface */
@@ -206,14 +214,18 @@ struct group_filter {
206#define GROUP_FILTER_SIZE(numsrc) \ 214#define GROUP_FILTER_SIZE(numsrc) \
207 (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \ 215 (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
208 + (numsrc) * sizeof(struct __kernel_sockaddr_storage)) 216 + (numsrc) * sizeof(struct __kernel_sockaddr_storage))
217#endif
209 218
219#if __UAPI_DEF_IN_PKTINFO
210struct in_pktinfo { 220struct in_pktinfo {
211 int ipi_ifindex; 221 int ipi_ifindex;
212 struct in_addr ipi_spec_dst; 222 struct in_addr ipi_spec_dst;
213 struct in_addr ipi_addr; 223 struct in_addr ipi_addr;
214}; 224};
225#endif
215 226
216/* Structure describing an Internet (IP) socket address. */ 227/* Structure describing an Internet (IP) socket address. */
228#if __UAPI_DEF_SOCKADDR_IN
217#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */ 229#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
218struct sockaddr_in { 230struct sockaddr_in {
219 __kernel_sa_family_t sin_family; /* Address family */ 231 __kernel_sa_family_t sin_family; /* Address family */
@@ -225,8 +237,9 @@ struct sockaddr_in {
225 sizeof(unsigned short int) - sizeof(struct in_addr)]; 237 sizeof(unsigned short int) - sizeof(struct in_addr)];
226}; 238};
227#define sin_zero __pad /* for BSD UNIX comp. -FvK */ 239#define sin_zero __pad /* for BSD UNIX comp. -FvK */
240#endif
228 241
229 242#if __UAPI_DEF_IN_CLASS
230/* 243/*
231 * Definitions of the bits in an Internet address integer. 244 * Definitions of the bits in an Internet address integer.
232 * On subnets, host and network parts are found according 245 * On subnets, host and network parts are found according
@@ -277,7 +290,7 @@ struct sockaddr_in {
277#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */ 290#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
278#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */ 291#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
279#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */ 292#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
280 293#endif
281 294
282/* <asm/byteorder.h> contains the htonl type stuff.. */ 295/* <asm/byteorder.h> contains the htonl type stuff.. */
283#include <asm/byteorder.h> 296#include <asm/byteorder.h>
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index c7093c75bdd6..68a1f71fde9f 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -111,9 +111,11 @@ enum {
111 INET_DIAG_SKMEMINFO, 111 INET_DIAG_SKMEMINFO,
112 INET_DIAG_SHUTDOWN, 112 INET_DIAG_SHUTDOWN,
113 INET_DIAG_DCTCPINFO, 113 INET_DIAG_DCTCPINFO,
114 INET_DIAG_PROTOCOL, /* response attribute only */
115 INET_DIAG_SKV6ONLY,
114}; 116};
115 117
116#define INET_DIAG_MAX INET_DIAG_DCTCPINFO 118#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
117 119
118/* INET_DIAG_MEM */ 120/* INET_DIAG_MEM */
119 121
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 411959405ab6..08f894d2ddbd 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -164,6 +164,7 @@ enum
164 IPV4_DEVCONF_ROUTE_LOCALNET, 164 IPV4_DEVCONF_ROUTE_LOCALNET,
165 IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL, 165 IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
166 IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL, 166 IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
167 IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
167 __IPV4_DEVCONF_MAX 168 __IPV4_DEVCONF_MAX
168}; 169};
169 170
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 2be7bd174751..f6598d1c886e 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,6 +34,7 @@
34#define RTF_PREF(pref) ((pref) << 27) 34#define RTF_PREF(pref) ((pref) << 27)
35#define RTF_PREF_MASK 0x18000000 35#define RTF_PREF_MASK 0x18000000
36 36
37#define RTF_PCPU 0x40000000
37#define RTF_LOCAL 0x80000000 38#define RTF_LOCAL 0x80000000
38 39
39 40
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index af94f31e33ac..d6833426fdef 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -27,7 +27,7 @@
27#include <linux/ioctl.h> 27#include <linux/ioctl.h>
28 28
29#define KFD_IOCTL_MAJOR_VERSION 1 29#define KFD_IOCTL_MAJOR_VERSION 1
30#define KFD_IOCTL_MINOR_VERSION 0 30#define KFD_IOCTL_MINOR_VERSION 1
31 31
32struct kfd_ioctl_get_version_args { 32struct kfd_ioctl_get_version_args {
33 uint32_t major_version; /* from KFD */ 33 uint32_t major_version; /* from KFD */
@@ -128,6 +128,110 @@ struct kfd_ioctl_get_process_apertures_args {
128 uint32_t pad; 128 uint32_t pad;
129}; 129};
130 130
131#define MAX_ALLOWED_NUM_POINTS 100
132#define MAX_ALLOWED_AW_BUFF_SIZE 4096
133#define MAX_ALLOWED_WAC_BUFF_SIZE 128
134
135struct kfd_ioctl_dbg_register_args {
136 uint32_t gpu_id; /* to KFD */
137 uint32_t pad;
138};
139
140struct kfd_ioctl_dbg_unregister_args {
141 uint32_t gpu_id; /* to KFD */
142 uint32_t pad;
143};
144
145struct kfd_ioctl_dbg_address_watch_args {
146 uint64_t content_ptr; /* a pointer to the actual content */
147 uint32_t gpu_id; /* to KFD */
148 uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */
149};
150
151struct kfd_ioctl_dbg_wave_control_args {
152 uint64_t content_ptr; /* a pointer to the actual content */
153 uint32_t gpu_id; /* to KFD */
154 uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */
155};
156
157/* Matching HSA_EVENTTYPE */
158#define KFD_IOC_EVENT_SIGNAL 0
159#define KFD_IOC_EVENT_NODECHANGE 1
160#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
161#define KFD_IOC_EVENT_HW_EXCEPTION 3
162#define KFD_IOC_EVENT_SYSTEM_EVENT 4
163#define KFD_IOC_EVENT_DEBUG_EVENT 5
164#define KFD_IOC_EVENT_PROFILE_EVENT 6
165#define KFD_IOC_EVENT_QUEUE_EVENT 7
166#define KFD_IOC_EVENT_MEMORY 8
167
168#define KFD_IOC_WAIT_RESULT_COMPLETE 0
169#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
170#define KFD_IOC_WAIT_RESULT_FAIL 2
171
172#define KFD_SIGNAL_EVENT_LIMIT 256
173
174struct kfd_ioctl_create_event_args {
175 uint64_t event_page_offset; /* from KFD */
176 uint32_t event_trigger_data; /* from KFD - signal events only */
177 uint32_t event_type; /* to KFD */
178 uint32_t auto_reset; /* to KFD */
179 uint32_t node_id; /* to KFD - only valid for certain
180 event types */
181 uint32_t event_id; /* from KFD */
182 uint32_t event_slot_index; /* from KFD */
183};
184
185struct kfd_ioctl_destroy_event_args {
186 uint32_t event_id; /* to KFD */
187 uint32_t pad;
188};
189
190struct kfd_ioctl_set_event_args {
191 uint32_t event_id; /* to KFD */
192 uint32_t pad;
193};
194
195struct kfd_ioctl_reset_event_args {
196 uint32_t event_id; /* to KFD */
197 uint32_t pad;
198};
199
200struct kfd_memory_exception_failure {
201 uint32_t NotPresent; /* Page not present or supervisor privilege */
202 uint32_t ReadOnly; /* Write access to a read-only page */
203 uint32_t NoExecute; /* Execute access to a page marked NX */
204 uint32_t pad;
205};
206
207/* memory exception data*/
208struct kfd_hsa_memory_exception_data {
209 struct kfd_memory_exception_failure failure;
210 uint64_t va;
211 uint32_t gpu_id;
212 uint32_t pad;
213};
214
215/* Event data*/
216struct kfd_event_data {
217 union {
218 struct kfd_hsa_memory_exception_data memory_exception_data;
219 }; /* From KFD */
220 uint64_t kfd_event_data_ext; /* pointer to an extension structure
221 for future exception types */
222 uint32_t event_id; /* to KFD */
223 uint32_t pad;
224};
225
226struct kfd_ioctl_wait_events_args {
227 uint64_t events_ptr; /* pointed to struct
228 kfd_event_data array, to KFD */
229 uint32_t num_events; /* to KFD */
230 uint32_t wait_for_all; /* to KFD */
231 uint32_t timeout; /* to KFD */
232 uint32_t wait_result; /* from KFD */
233};
234
131#define AMDKFD_IOCTL_BASE 'K' 235#define AMDKFD_IOCTL_BASE 'K'
132#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 236#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
133#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 237#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -155,7 +259,34 @@ struct kfd_ioctl_get_process_apertures_args {
155#define AMDKFD_IOC_UPDATE_QUEUE \ 259#define AMDKFD_IOC_UPDATE_QUEUE \
156 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) 260 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
157 261
262#define AMDKFD_IOC_CREATE_EVENT \
263 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
264
265#define AMDKFD_IOC_DESTROY_EVENT \
266 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
267
268#define AMDKFD_IOC_SET_EVENT \
269 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
270
271#define AMDKFD_IOC_RESET_EVENT \
272 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
273
274#define AMDKFD_IOC_WAIT_EVENTS \
275 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
276
277#define AMDKFD_IOC_DBG_REGISTER \
278 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
279
280#define AMDKFD_IOC_DBG_UNREGISTER \
281 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
282
283#define AMDKFD_IOC_DBG_ADDRESS_WATCH \
284 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
285
286#define AMDKFD_IOC_DBG_WAVE_CONTROL \
287 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
288
158#define AMDKFD_COMMAND_START 0x01 289#define AMDKFD_COMMAND_START 0x01
159#define AMDKFD_COMMAND_END 0x08 290#define AMDKFD_COMMAND_END 0x11
160 291
161#endif 292#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4b60056776d1..716ad4ae4d4b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -202,7 +202,7 @@ struct kvm_run {
202 __u32 exit_reason; 202 __u32 exit_reason;
203 __u8 ready_for_interrupt_injection; 203 __u8 ready_for_interrupt_injection;
204 __u8 if_flag; 204 __u8 if_flag;
205 __u8 padding2[2]; 205 __u16 flags;
206 206
207 /* in (pre_kvm_run), out (post_kvm_run) */ 207 /* in (pre_kvm_run), out (post_kvm_run) */
208 __u64 cr8; 208 __u64 cr8;
@@ -814,6 +814,9 @@ struct kvm_ppc_smmu_info {
814#define KVM_CAP_S390_INJECT_IRQ 113 814#define KVM_CAP_S390_INJECT_IRQ 113
815#define KVM_CAP_S390_IRQ_STATE 114 815#define KVM_CAP_S390_IRQ_STATE 114
816#define KVM_CAP_PPC_HWRNG 115 816#define KVM_CAP_PPC_HWRNG 115
817#define KVM_CAP_DISABLE_QUIRKS 116
818#define KVM_CAP_X86_SMM 117
819#define KVM_CAP_MULTI_ADDRESS_SPACE 118
817 820
818#ifdef KVM_CAP_IRQ_ROUTING 821#ifdef KVM_CAP_IRQ_ROUTING
819 822
@@ -894,7 +897,7 @@ struct kvm_xen_hvm_config {
894 * 897 *
895 * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies 898 * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
896 * the irqfd to operate in resampling mode for level triggered interrupt 899 * the irqfd to operate in resampling mode for level triggered interrupt
897 * emlation. See Documentation/virtual/kvm/api.txt. 900 * emulation. See Documentation/virtual/kvm/api.txt.
898 */ 901 */
899#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) 902#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
900 903
@@ -1199,6 +1202,8 @@ struct kvm_s390_ucas_mapping {
1199/* Available with KVM_CAP_S390_IRQ_STATE */ 1202/* Available with KVM_CAP_S390_IRQ_STATE */
1200#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state) 1203#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
1201#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state) 1204#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
1205/* Available with KVM_CAP_X86_SMM */
1206#define KVM_SMI _IO(KVMIO, 0xb7)
1202 1207
1203#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 1208#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
1204#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) 1209#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index fa673e9cc040..7d024ceb075d 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -56,6 +56,13 @@
56 56
57/* GLIBC headers included first so don't define anything 57/* GLIBC headers included first so don't define anything
58 * that would already be defined. */ 58 * that would already be defined. */
59#define __UAPI_DEF_IN_ADDR 0
60#define __UAPI_DEF_IN_IPPROTO 0
61#define __UAPI_DEF_IN_PKTINFO 0
62#define __UAPI_DEF_IP_MREQ 0
63#define __UAPI_DEF_SOCKADDR_IN 0
64#define __UAPI_DEF_IN_CLASS 0
65
59#define __UAPI_DEF_IN6_ADDR 0 66#define __UAPI_DEF_IN6_ADDR 0
60/* The exception is the in6_addr macros which must be defined 67/* The exception is the in6_addr macros which must be defined
61 * if the glibc code didn't define them. This guard matches 68 * if the glibc code didn't define them. This guard matches
@@ -78,6 +85,13 @@
78/* Linux headers included first, and we must define everything 85/* Linux headers included first, and we must define everything
79 * we need. The expectation is that glibc will check the 86 * we need. The expectation is that glibc will check the
80 * __UAPI_DEF_* defines and adjust appropriately. */ 87 * __UAPI_DEF_* defines and adjust appropriately. */
88#define __UAPI_DEF_IN_ADDR 1
89#define __UAPI_DEF_IN_IPPROTO 1
90#define __UAPI_DEF_IN_PKTINFO 1
91#define __UAPI_DEF_IP_MREQ 1
92#define __UAPI_DEF_SOCKADDR_IN 1
93#define __UAPI_DEF_IN_CLASS 1
94
81#define __UAPI_DEF_IN6_ADDR 1 95#define __UAPI_DEF_IN6_ADDR 1
82/* We unconditionally define the in6_addr macros and glibc must 96/* We unconditionally define the in6_addr macros and glibc must
83 * coordinate. */ 97 * coordinate. */
@@ -103,6 +117,14 @@
103 * that we need. */ 117 * that we need. */
104#else /* !defined(__GLIBC__) */ 118#else /* !defined(__GLIBC__) */
105 119
120/* Definitions for in.h */
121#define __UAPI_DEF_IN_ADDR 1
122#define __UAPI_DEF_IN_IPPROTO 1
123#define __UAPI_DEF_IN_PKTINFO 1
124#define __UAPI_DEF_IP_MREQ 1
125#define __UAPI_DEF_SOCKADDR_IN 1
126#define __UAPI_DEF_IN_CLASS 1
127
106/* Definitions for in6.h */ 128/* Definitions for in6.h */
107#define __UAPI_DEF_IN6_ADDR 1 129#define __UAPI_DEF_IN6_ADDR 1
108#define __UAPI_DEF_IN6_ADDR_ALT 1 130#define __UAPI_DEF_IN6_ADDR_ALT 1
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 6eb40244e019..302a2ced373c 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -80,6 +80,12 @@ struct mic_device_ctrl {
80 * @h2c_config_db: Host to Card Virtio config doorbell set by card 80 * @h2c_config_db: Host to Card Virtio config doorbell set by card
81 * @shutdown_status: Card shutdown status set by card 81 * @shutdown_status: Card shutdown status set by card
82 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated 82 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated
83 * @tot_nodes: Total number of nodes in the SCIF network
84 * @node_id: Unique id of the node
85 * @h2c_scif_db - Host to card SCIF doorbell set by card
86 * @c2h_scif_db - Card to host SCIF doorbell set by host
87 * @scif_host_dma_addr - SCIF host queue pair DMA address
88 * @scif_card_dma_addr - SCIF card queue pair DMA address
83 */ 89 */
84struct mic_bootparam { 90struct mic_bootparam {
85 __le32 magic; 91 __le32 magic;
@@ -88,6 +94,12 @@ struct mic_bootparam {
88 __s8 h2c_config_db; 94 __s8 h2c_config_db;
89 __u8 shutdown_status; 95 __u8 shutdown_status;
90 __u8 shutdown_card; 96 __u8 shutdown_card;
97 __u8 tot_nodes;
98 __u8 node_id;
99 __u8 h2c_scif_db;
100 __u8 c2h_scif_db;
101 __u64 scif_host_dma_addr;
102 __u64 scif_card_dma_addr;
91} __attribute__ ((aligned(8))); 103} __attribute__ ((aligned(8)));
92 104
93/** 105/**
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 4f52549b23ff..e08e413d5f71 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -44,8 +44,6 @@ enum {
44/* there is a gap here to match userspace */ 44/* there is a gap here to match userspace */
45#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */ 45#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
46 46
47#define nbd_cmd(req) ((req)->cmd[0])
48
49/* userspace doesn't need the nbd_device structure */ 47/* userspace doesn't need the nbd_device structure */
50 48
51/* These are sent over the network in the request/reply magic fields */ 49/* These are sent over the network in the request/reply magic fields */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
new file mode 100644
index 000000000000..2b94ea2287bb
--- /dev/null
+++ b/include/uapi/linux/ndctl.h
@@ -0,0 +1,197 @@
1/*
2 * Copyright (c) 2014-2015, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU Lesser General Public License,
6 * version 2.1, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT ANY
9 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
10 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
11 * more details.
12 */
13#ifndef __NDCTL_H__
14#define __NDCTL_H__
15
16#include <linux/types.h>
17
18struct nd_cmd_smart {
19 __u32 status;
20 __u8 data[128];
21} __packed;
22
23struct nd_cmd_smart_threshold {
24 __u32 status;
25 __u8 data[8];
26} __packed;
27
28struct nd_cmd_dimm_flags {
29 __u32 status;
30 __u32 flags;
31} __packed;
32
33struct nd_cmd_get_config_size {
34 __u32 status;
35 __u32 config_size;
36 __u32 max_xfer;
37} __packed;
38
39struct nd_cmd_get_config_data_hdr {
40 __u32 in_offset;
41 __u32 in_length;
42 __u32 status;
43 __u8 out_buf[0];
44} __packed;
45
46struct nd_cmd_set_config_hdr {
47 __u32 in_offset;
48 __u32 in_length;
49 __u8 in_buf[0];
50} __packed;
51
52struct nd_cmd_vendor_hdr {
53 __u32 opcode;
54 __u32 in_length;
55 __u8 in_buf[0];
56} __packed;
57
58struct nd_cmd_vendor_tail {
59 __u32 status;
60 __u32 out_length;
61 __u8 out_buf[0];
62} __packed;
63
64struct nd_cmd_ars_cap {
65 __u64 address;
66 __u64 length;
67 __u32 status;
68 __u32 max_ars_out;
69} __packed;
70
71struct nd_cmd_ars_start {
72 __u64 address;
73 __u64 length;
74 __u16 type;
75 __u8 reserved[6];
76 __u32 status;
77} __packed;
78
79struct nd_cmd_ars_status {
80 __u32 status;
81 __u32 out_length;
82 __u64 address;
83 __u64 length;
84 __u16 type;
85 __u32 num_records;
86 struct nd_ars_record {
87 __u32 handle;
88 __u32 flags;
89 __u64 err_address;
90 __u64 mask;
91 } __packed records[0];
92} __packed;
93
94enum {
95 ND_CMD_IMPLEMENTED = 0,
96
97 /* bus commands */
98 ND_CMD_ARS_CAP = 1,
99 ND_CMD_ARS_START = 2,
100 ND_CMD_ARS_STATUS = 3,
101
102 /* per-dimm commands */
103 ND_CMD_SMART = 1,
104 ND_CMD_SMART_THRESHOLD = 2,
105 ND_CMD_DIMM_FLAGS = 3,
106 ND_CMD_GET_CONFIG_SIZE = 4,
107 ND_CMD_GET_CONFIG_DATA = 5,
108 ND_CMD_SET_CONFIG_DATA = 6,
109 ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7,
110 ND_CMD_VENDOR_EFFECT_LOG = 8,
111 ND_CMD_VENDOR = 9,
112};
113
114static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
115{
116 static const char * const names[] = {
117 [ND_CMD_ARS_CAP] = "ars_cap",
118 [ND_CMD_ARS_START] = "ars_start",
119 [ND_CMD_ARS_STATUS] = "ars_status",
120 };
121
122 if (cmd < ARRAY_SIZE(names) && names[cmd])
123 return names[cmd];
124 return "unknown";
125}
126
127static inline const char *nvdimm_cmd_name(unsigned cmd)
128{
129 static const char * const names[] = {
130 [ND_CMD_SMART] = "smart",
131 [ND_CMD_SMART_THRESHOLD] = "smart_thresh",
132 [ND_CMD_DIMM_FLAGS] = "flags",
133 [ND_CMD_GET_CONFIG_SIZE] = "get_size",
134 [ND_CMD_GET_CONFIG_DATA] = "get_data",
135 [ND_CMD_SET_CONFIG_DATA] = "set_data",
136 [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
137 [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
138 [ND_CMD_VENDOR] = "vendor",
139 };
140
141 if (cmd < ARRAY_SIZE(names) && names[cmd])
142 return names[cmd];
143 return "unknown";
144}
145
146#define ND_IOCTL 'N'
147
148#define ND_IOCTL_SMART _IOWR(ND_IOCTL, ND_CMD_SMART,\
149 struct nd_cmd_smart)
150
151#define ND_IOCTL_SMART_THRESHOLD _IOWR(ND_IOCTL, ND_CMD_SMART_THRESHOLD,\
152 struct nd_cmd_smart_threshold)
153
154#define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS,\
155 struct nd_cmd_dimm_flags)
156
157#define ND_IOCTL_GET_CONFIG_SIZE _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_SIZE,\
158 struct nd_cmd_get_config_size)
159
160#define ND_IOCTL_GET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_DATA,\
161 struct nd_cmd_get_config_data_hdr)
162
163#define ND_IOCTL_SET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_SET_CONFIG_DATA,\
164 struct nd_cmd_set_config_hdr)
165
166#define ND_IOCTL_VENDOR _IOWR(ND_IOCTL, ND_CMD_VENDOR,\
167 struct nd_cmd_vendor_hdr)
168
169#define ND_IOCTL_ARS_CAP _IOWR(ND_IOCTL, ND_CMD_ARS_CAP,\
170 struct nd_cmd_ars_cap)
171
172#define ND_IOCTL_ARS_START _IOWR(ND_IOCTL, ND_CMD_ARS_START,\
173 struct nd_cmd_ars_start)
174
175#define ND_IOCTL_ARS_STATUS _IOWR(ND_IOCTL, ND_CMD_ARS_STATUS,\
176 struct nd_cmd_ars_status)
177
178#define ND_DEVICE_DIMM 1 /* nd_dimm: container for "config data" */
179#define ND_DEVICE_REGION_PMEM 2 /* nd_region: (parent of PMEM namespaces) */
180#define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */
181#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
182#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
183#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
184
185enum nd_driver_flags {
186 ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM,
187 ND_DRIVER_REGION_PMEM = 1 << ND_DEVICE_REGION_PMEM,
188 ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK,
189 ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
190 ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
191 ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
192};
193
194enum {
195 ND_MIN_NAMESPACE_SIZE = 0x00400000,
196};
197#endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 669a1f0b1d97..23cbd34e4ac7 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -15,6 +15,7 @@ enum {
15 NETCONFA_RP_FILTER, 15 NETCONFA_RP_FILTER,
16 NETCONFA_MC_FORWARDING, 16 NETCONFA_MC_FORWARDING,
17 NETCONFA_PROXY_NEIGH, 17 NETCONFA_PROXY_NEIGH,
18 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
18 __NETCONFA_MAX 19 __NETCONFA_MAX
19}; 20};
20#define NETCONFA_MAX (__NETCONFA_MAX - 1) 21#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ef1b1f88ca18..d93f949d1d9a 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -4,7 +4,8 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6#include <linux/sysctl.h> 6#include <linux/sysctl.h>
7 7#include <linux/in.h>
8#include <linux/in6.h>
8 9
9/* Responses from hook functions. */ 10/* Responses from hook functions. */
10#define NF_DROP 0 11#define NF_DROP 0
@@ -51,11 +52,17 @@ enum nf_inet_hooks {
51 NF_INET_NUMHOOKS 52 NF_INET_NUMHOOKS
52}; 53};
53 54
55enum nf_dev_hooks {
56 NF_NETDEV_INGRESS,
57 NF_NETDEV_NUMHOOKS
58};
59
54enum { 60enum {
55 NFPROTO_UNSPEC = 0, 61 NFPROTO_UNSPEC = 0,
56 NFPROTO_INET = 1, 62 NFPROTO_INET = 1,
57 NFPROTO_IPV4 = 2, 63 NFPROTO_IPV4 = 2,
58 NFPROTO_ARP = 3, 64 NFPROTO_ARP = 3,
65 NFPROTO_NETDEV = 5,
59 NFPROTO_BRIDGE = 7, 66 NFPROTO_BRIDGE = 7,
60 NFPROTO_IPV6 = 10, 67 NFPROTO_IPV6 = 10,
61 NFPROTO_DECNET = 12, 68 NFPROTO_DECNET = 12,
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 5ab4e60894cf..63b2e34f1b60 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -15,12 +15,12 @@
15/* The protocol version */ 15/* The protocol version */
16#define IPSET_PROTOCOL 6 16#define IPSET_PROTOCOL 6
17 17
18/* The maximum permissible comment length we will accept over netlink */
19#define IPSET_MAX_COMMENT_SIZE 255
20
21/* The max length of strings including NUL: set and type identifiers */ 18/* The max length of strings including NUL: set and type identifiers */
22#define IPSET_MAXNAMELEN 32 19#define IPSET_MAXNAMELEN 32
23 20
21/* The maximum permissible comment length we will accept over netlink */
22#define IPSET_MAX_COMMENT_SIZE 255
23
24/* Message types and commands */ 24/* Message types and commands */
25enum ipset_cmd { 25enum ipset_cmd {
26 IPSET_CMD_NONE, 26 IPSET_CMD_NONE,
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 5fa1cd04762e..a99e6a997140 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -122,11 +122,13 @@ enum nft_list_attributes {
122 * 122 *
123 * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32) 123 * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
124 * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32) 124 * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
125 * @NFTA_HOOK_DEV: netdevice name (NLA_STRING)
125 */ 126 */
126enum nft_hook_attributes { 127enum nft_hook_attributes {
127 NFTA_HOOK_UNSPEC, 128 NFTA_HOOK_UNSPEC,
128 NFTA_HOOK_HOOKNUM, 129 NFTA_HOOK_HOOKNUM,
129 NFTA_HOOK_PRIORITY, 130 NFTA_HOOK_PRIORITY,
131 NFTA_HOOK_DEV,
130 __NFTA_HOOK_MAX 132 __NFTA_HOOK_MAX
131}; 133};
132#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1) 134#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index 8dd819e2b5fe..b67a853638ff 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -49,6 +49,7 @@ enum nfqnl_attr_type {
49 NFQA_EXP, /* nf_conntrack_netlink.h */ 49 NFQA_EXP, /* nf_conntrack_netlink.h */
50 NFQA_UID, /* __u32 sk uid */ 50 NFQA_UID, /* __u32 sk uid */
51 NFQA_GID, /* __u32 sk gid */ 51 NFQA_GID, /* __u32 sk gid */
52 NFQA_SECCTX, /* security context string */
52 53
53 __NFQA_MAX 54 __NFQA_MAX
54}; 55};
@@ -102,7 +103,8 @@ enum nfqnl_attr_config {
102#define NFQA_CFG_F_CONNTRACK (1 << 1) 103#define NFQA_CFG_F_CONNTRACK (1 << 1)
103#define NFQA_CFG_F_GSO (1 << 2) 104#define NFQA_CFG_F_GSO (1 << 2)
104#define NFQA_CFG_F_UID_GID (1 << 3) 105#define NFQA_CFG_F_UID_GID (1 << 3)
105#define NFQA_CFG_F_MAX (1 << 4) 106#define NFQA_CFG_F_SECCTX (1 << 4)
107#define NFQA_CFG_F_MAX (1 << 5)
106 108
107/* flags for NFQA_SKB_INFO */ 109/* flags for NFQA_SKB_INFO */
108/* packet appears to have wrong checksums, but they are ok */ 110/* packet appears to have wrong checksums, but they are ok */
diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h
index 6315e2ac3474..87644f832494 100644
--- a/include/uapi/linux/netfilter/xt_socket.h
+++ b/include/uapi/linux/netfilter/xt_socket.h
@@ -6,6 +6,7 @@
6enum { 6enum {
7 XT_SOCKET_TRANSPARENT = 1 << 0, 7 XT_SOCKET_TRANSPARENT = 1 << 0,
8 XT_SOCKET_NOWILDCARD = 1 << 1, 8 XT_SOCKET_NOWILDCARD = 1 << 1,
9 XT_SOCKET_RESTORESKMARK = 1 << 2,
9}; 10};
10 11
11struct xt_socket_mtinfo1 { 12struct xt_socket_mtinfo1 {
@@ -18,4 +19,11 @@ struct xt_socket_mtinfo2 {
18}; 19};
19#define XT_SOCKET_FLAGS_V2 (XT_SOCKET_TRANSPARENT | XT_SOCKET_NOWILDCARD) 20#define XT_SOCKET_FLAGS_V2 (XT_SOCKET_TRANSPARENT | XT_SOCKET_NOWILDCARD)
20 21
22struct xt_socket_mtinfo3 {
23 __u8 flags;
24};
25#define XT_SOCKET_FLAGS_V3 (XT_SOCKET_TRANSPARENT \
26 | XT_SOCKET_NOWILDCARD \
27 | XT_SOCKET_RESTORESKMARK)
28
21#endif /* _XT_SOCKET_H */ 29#endif /* _XT_SOCKET_H */
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index 773dfe8924c7..fd2ee501726d 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * ebtables.c,v 2.0, April, 2002 7 * ebtables.c,v 2.0, April, 2002
8 * 8 *
9 * This code is stongly inspired on the iptables code which is 9 * This code is strongly inspired by the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 */ 11 */
12 12
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 1a85940f8ab7..cf6a65cccbdf 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -101,13 +101,15 @@ struct nlmsgerr {
101 struct nlmsghdr msg; 101 struct nlmsghdr msg;
102}; 102};
103 103
104#define NETLINK_ADD_MEMBERSHIP 1 104#define NETLINK_ADD_MEMBERSHIP 1
105#define NETLINK_DROP_MEMBERSHIP 2 105#define NETLINK_DROP_MEMBERSHIP 2
106#define NETLINK_PKTINFO 3 106#define NETLINK_PKTINFO 3
107#define NETLINK_BROADCAST_ERROR 4 107#define NETLINK_BROADCAST_ERROR 4
108#define NETLINK_NO_ENOBUFS 5 108#define NETLINK_NO_ENOBUFS 5
109#define NETLINK_RX_RING 6 109#define NETLINK_RX_RING 6
110#define NETLINK_TX_RING 7 110#define NETLINK_TX_RING 7
111#define NETLINK_LISTEN_ALL_NSID 8
112#define NETLINK_LIST_MEMBERSHIPS 9
111 113
112struct nl_pktinfo { 114struct nl_pktinfo {
113 __u32 group; 115 __u32 group;
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index c1e2e63cf9b5..dd3f75389076 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -86,6 +86,8 @@
86 * for this event is the application ID (AID). 86 * for this event is the application ID (AID).
87 * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller. 87 * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
88 * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element. 88 * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
89 * @NFC_CMD_VENDOR: Vendor specific command, to be implemented directly
90 * from the driver in order to support hardware specific operations.
89 */ 91 */
90enum nfc_commands { 92enum nfc_commands {
91 NFC_CMD_UNSPEC, 93 NFC_CMD_UNSPEC,
@@ -117,6 +119,7 @@ enum nfc_commands {
117 NFC_CMD_GET_SE, 119 NFC_CMD_GET_SE,
118 NFC_CMD_SE_IO, 120 NFC_CMD_SE_IO,
119 NFC_CMD_ACTIVATE_TARGET, 121 NFC_CMD_ACTIVATE_TARGET,
122 NFC_CMD_VENDOR,
120/* private: internal use only */ 123/* private: internal use only */
121 __NFC_CMD_AFTER_LAST 124 __NFC_CMD_AFTER_LAST
122}; 125};
@@ -153,6 +156,10 @@ enum nfc_commands {
153 * @NFC_ATTR_APDU: Secure element APDU 156 * @NFC_ATTR_APDU: Secure element APDU
154 * @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier 157 * @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier
155 * @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier 158 * @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier
159 * @NFC_ATTR_VENDOR_ID: NFC manufacturer unique ID, typically an OUI
160 * @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command
161 * @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed
162 * to a vendor specific command implementation
156 */ 163 */
157enum nfc_attrs { 164enum nfc_attrs {
158 NFC_ATTR_UNSPEC, 165 NFC_ATTR_UNSPEC,
@@ -184,6 +191,9 @@ enum nfc_attrs {
184 NFC_ATTR_TARGET_ISO15693_DSFID, 191 NFC_ATTR_TARGET_ISO15693_DSFID,
185 NFC_ATTR_TARGET_ISO15693_UID, 192 NFC_ATTR_TARGET_ISO15693_UID,
186 NFC_ATTR_SE_PARAMS, 193 NFC_ATTR_SE_PARAMS,
194 NFC_ATTR_VENDOR_ID,
195 NFC_ATTR_VENDOR_SUBCMD,
196 NFC_ATTR_VENDOR_DATA,
187/* private: internal use only */ 197/* private: internal use only */
188 __NFC_ATTR_AFTER_LAST 198 __NFC_ATTR_AFTER_LAST
189}; 199};
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index adc0aff83fbb..2119c7c274d7 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -86,6 +86,10 @@
86#define ACL4_SUPPORT_AUDIT_ACL 0x04 86#define ACL4_SUPPORT_AUDIT_ACL 0x04
87#define ACL4_SUPPORT_ALARM_ACL 0x08 87#define ACL4_SUPPORT_ALARM_ACL 0x08
88 88
89#define NFS4_ACL_AUTO_INHERIT 0x00000001
90#define NFS4_ACL_PROTECTED 0x00000002
91#define NFS4_ACL_DEFAULTED 0x00000004
92
89#define NFS4_ACE_FILE_INHERIT_ACE 0x00000001 93#define NFS4_ACE_FILE_INHERIT_ACE 0x00000001
90#define NFS4_ACE_DIRECTORY_INHERIT_ACE 0x00000002 94#define NFS4_ACE_DIRECTORY_INHERIT_ACE 0x00000002
91#define NFS4_ACE_NO_PROPAGATE_INHERIT_ACE 0x00000004 95#define NFS4_ACE_NO_PROPAGATE_INHERIT_ACE 0x00000004
@@ -93,6 +97,7 @@
93#define NFS4_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010 97#define NFS4_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010
94#define NFS4_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020 98#define NFS4_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020
95#define NFS4_ACE_IDENTIFIER_GROUP 0x00000040 99#define NFS4_ACE_IDENTIFIER_GROUP 0x00000040
100#define NFS4_ACE_INHERITED_ACE 0x00000080
96 101
97#define NFS4_ACE_READ_DATA 0x00000001 102#define NFS4_ACE_READ_DATA 0x00000001
98#define NFS4_ACE_LIST_DIRECTORY 0x00000001 103#define NFS4_ACE_LIST_DIRECTORY 0x00000001
@@ -106,6 +111,8 @@
106#define NFS4_ACE_DELETE_CHILD 0x00000040 111#define NFS4_ACE_DELETE_CHILD 0x00000040
107#define NFS4_ACE_READ_ATTRIBUTES 0x00000080 112#define NFS4_ACE_READ_ATTRIBUTES 0x00000080
108#define NFS4_ACE_WRITE_ATTRIBUTES 0x00000100 113#define NFS4_ACE_WRITE_ATTRIBUTES 0x00000100
114#define NFS4_ACE_WRITE_RETENTION 0x00000200
115#define NFS4_ACE_WRITE_RETENTION_HOLD 0x00000400
109#define NFS4_ACE_DELETE 0x00010000 116#define NFS4_ACE_DELETE 0x00010000
110#define NFS4_ACE_READ_ACL 0x00020000 117#define NFS4_ACE_READ_ACL 0x00020000
111#define NFS4_ACE_WRITE_ACL 0x00040000 118#define NFS4_ACE_WRITE_ACL 0x00040000
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 241220c43e86..c0ab6b0a3919 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2620,16 +2620,17 @@ enum nl80211_band_attr {
2620 * an indoor surroundings, i.e., it is connected to AC power (and not 2620 * an indoor surroundings, i.e., it is connected to AC power (and not
2621 * through portable DC inverters) or is under the control of a master 2621 * through portable DC inverters) or is under the control of a master
2622 * that is acting as an AP and is connected to AC power. 2622 * that is acting as an AP and is connected to AC power.
2623 * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this 2623 * @NL80211_FREQUENCY_ATTR_IR_CONCURRENT: IR operation is allowed on this
2624 * channel if it's connected concurrently to a BSS on the same channel on 2624 * channel if it's connected concurrently to a BSS on the same channel on
2625 * the 2 GHz band or to a channel in the same UNII band (on the 5 GHz 2625 * the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
2626 * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a 2626 * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO or TDLS
2627 * channel that has the GO_CONCURRENT attribute set can be done when there 2627 * off-channel on a channel that has the IR_CONCURRENT attribute set can be
2628 * is a clear assessment that the device is operating under the guidance of 2628 * done when there is a clear assessment that the device is operating under
2629 * an authorized master, i.e., setting up a GO while the device is also 2629 * the guidance of an authorized master, i.e., setting up a GO or TDLS
2630 * connected to an AP with DFS and radar detection on the UNII band (it is 2630 * off-channel while the device is also connected to an AP with DFS and
2631 * up to user-space, i.e., wpa_supplicant to perform the required 2631 * radar detection on the UNII band (it is up to user-space, i.e.,
2632 * verifications) 2632 * wpa_supplicant to perform the required verifications). Using this
2633 * attribute for IR is disallowed for master interfaces (IBSS, AP).
2633 * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed 2634 * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
2634 * on this channel in current regulatory domain. 2635 * on this channel in current regulatory domain.
2635 * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed 2636 * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
@@ -2641,7 +2642,7 @@ enum nl80211_band_attr {
2641 * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122 2642 * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
2642 * for more information on the FCC description of the relaxations allowed 2643 * for more information on the FCC description of the relaxations allowed
2643 * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and 2644 * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
2644 * NL80211_FREQUENCY_ATTR_GO_CONCURRENT. 2645 * NL80211_FREQUENCY_ATTR_IR_CONCURRENT.
2645 */ 2646 */
2646enum nl80211_frequency_attr { 2647enum nl80211_frequency_attr {
2647 __NL80211_FREQUENCY_ATTR_INVALID, 2648 __NL80211_FREQUENCY_ATTR_INVALID,
@@ -2659,7 +2660,7 @@ enum nl80211_frequency_attr {
2659 NL80211_FREQUENCY_ATTR_NO_160MHZ, 2660 NL80211_FREQUENCY_ATTR_NO_160MHZ,
2660 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME, 2661 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
2661 NL80211_FREQUENCY_ATTR_INDOOR_ONLY, 2662 NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
2662 NL80211_FREQUENCY_ATTR_GO_CONCURRENT, 2663 NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
2663 NL80211_FREQUENCY_ATTR_NO_20MHZ, 2664 NL80211_FREQUENCY_ATTR_NO_20MHZ,
2664 NL80211_FREQUENCY_ATTR_NO_10MHZ, 2665 NL80211_FREQUENCY_ATTR_NO_10MHZ,
2665 2666
@@ -2672,6 +2673,8 @@ enum nl80211_frequency_attr {
2672#define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN NL80211_FREQUENCY_ATTR_NO_IR 2673#define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN NL80211_FREQUENCY_ATTR_NO_IR
2673#define NL80211_FREQUENCY_ATTR_NO_IBSS NL80211_FREQUENCY_ATTR_NO_IR 2674#define NL80211_FREQUENCY_ATTR_NO_IBSS NL80211_FREQUENCY_ATTR_NO_IR
2674#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR 2675#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR
2676#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
2677 NL80211_FREQUENCY_ATTR_IR_CONCURRENT
2675 2678
2676/** 2679/**
2677 * enum nl80211_bitrate_attr - bitrate attributes 2680 * enum nl80211_bitrate_attr - bitrate attributes
@@ -2830,7 +2833,7 @@ enum nl80211_sched_scan_match_attr {
2830 * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated 2833 * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
2831 * base on contiguous rules and wider channels will be allowed to cross 2834 * base on contiguous rules and wider channels will be allowed to cross
2832 * multiple contiguous/overlapping frequency ranges. 2835 * multiple contiguous/overlapping frequency ranges.
2833 * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT 2836 * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
2834 * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation 2837 * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
2835 * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation 2838 * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
2836 * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed 2839 * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -2847,7 +2850,7 @@ enum nl80211_reg_rule_flags {
2847 NL80211_RRF_NO_IR = 1<<7, 2850 NL80211_RRF_NO_IR = 1<<7,
2848 __NL80211_RRF_NO_IBSS = 1<<8, 2851 __NL80211_RRF_NO_IBSS = 1<<8,
2849 NL80211_RRF_AUTO_BW = 1<<11, 2852 NL80211_RRF_AUTO_BW = 1<<11,
2850 NL80211_RRF_GO_CONCURRENT = 1<<12, 2853 NL80211_RRF_IR_CONCURRENT = 1<<12,
2851 NL80211_RRF_NO_HT40MINUS = 1<<13, 2854 NL80211_RRF_NO_HT40MINUS = 1<<13,
2852 NL80211_RRF_NO_HT40PLUS = 1<<14, 2855 NL80211_RRF_NO_HT40PLUS = 1<<14,
2853 NL80211_RRF_NO_80MHZ = 1<<15, 2856 NL80211_RRF_NO_80MHZ = 1<<15,
@@ -2859,6 +2862,7 @@ enum nl80211_reg_rule_flags {
2859#define NL80211_RRF_NO_IR NL80211_RRF_NO_IR 2862#define NL80211_RRF_NO_IR NL80211_RRF_NO_IR
2860#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\ 2863#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
2861 NL80211_RRF_NO_HT40PLUS) 2864 NL80211_RRF_NO_HT40PLUS)
2865#define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT
2862 2866
2863/* For backport compatibility with older userspace */ 2867/* For backport compatibility with older userspace */
2864#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS) 2868#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index aef9a81b2d75..732b32e92b02 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -179,6 +179,10 @@ enum {
179 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, 179 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
180}; 180};
181 181
182enum {
183 NVME_AER_NOTICE_NS_CHANGED = 0x0002,
184};
185
182struct nvme_lba_range_type { 186struct nvme_lba_range_type {
183 __u8 type; 187 __u8 type;
184 __u8 attributes; 188 __u8 attributes;
@@ -579,5 +583,6 @@ struct nvme_passthru_cmd {
579#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd) 583#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
580#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) 584#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
581#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) 585#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
586#define NVME_IOCTL_RESET _IO('N', 0x44)
582 587
583#endif /* _UAPI_LINUX_NVME_H */ 588#endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index bbd49a0c46c7..1dab77601c21 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -153,6 +153,8 @@ enum ovs_packet_cmd {
153 * flow key against the kernel's. 153 * flow key against the kernel's.
154 * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used 154 * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used
155 * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. 155 * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes.
156 * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional
157 * %OVS_USERSPACE_ATTR_ACTIONS attribute.
156 * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION 158 * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
157 * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an 159 * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
158 * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content 160 * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
@@ -528,6 +530,7 @@ enum ovs_sample_attr {
528 * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. 530 * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
529 * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get 531 * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
530 * tunnel info. 532 * tunnel info.
533 * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall.
531 */ 534 */
532enum ovs_userspace_attr { 535enum ovs_userspace_attr {
533 OVS_USERSPACE_ATTR_UNSPEC, 536 OVS_USERSPACE_ATTR_UNSPEC,
@@ -535,6 +538,7 @@ enum ovs_userspace_attr {
535 OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ 538 OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
536 OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port 539 OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port
537 * to get tunnel info. */ 540 * to get tunnel info. */
541 OVS_USERSPACE_ATTR_ACTIONS, /* Optional flag to get actions. */
538 __OVS_USERSPACE_ATTR_MAX 542 __OVS_USERSPACE_ATTR_MAX
539}; 543};
540 544
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ 319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ 320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ 321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
322#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
322#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ 323#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
323 324
324/* MSI-X Table entry format */ 325/* MSI-X Table entry format */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index bf08e76bf505..4f0d1bc3647d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/pkt_sched.h> 5#include <linux/pkt_sched.h>
6 6
7#ifdef __KERNEL__
7/* I think i could have done better macros ; for now this is stolen from 8/* I think i could have done better macros ; for now this is stolen from
8 * some arch/mips code - jhs 9 * some arch/mips code - jhs
9*/ 10*/
@@ -35,20 +36,6 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
35 * 36 *
36 * */ 37 * */
37 38
38#define TC_MUNGED _TC_MAKEMASK1(0)
39#define SET_TC_MUNGED(v) ( TC_MUNGED | (v & ~TC_MUNGED))
40#define CLR_TC_MUNGED(v) ( v & ~TC_MUNGED)
41
42#define TC_OK2MUNGE _TC_MAKEMASK1(1)
43#define SET_TC_OK2MUNGE(v) ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE))
44#define CLR_TC_OK2MUNGE(v) ( v & ~TC_OK2MUNGE)
45
46#define S_TC_VERD _TC_MAKE32(2)
47#define M_TC_VERD _TC_MAKEMASK(4,S_TC_VERD)
48#define G_TC_VERD(x) _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD)
49#define V_TC_VERD(x) _TC_MAKEVALUE(x,S_TC_VERD)
50#define SET_TC_VERD(v,n) ((V_TC_VERD(n)) | (v & ~M_TC_VERD))
51
52#define S_TC_FROM _TC_MAKE32(6) 39#define S_TC_FROM _TC_MAKE32(6)
53#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM) 40#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM)
54#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM) 41#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM)
@@ -62,18 +49,16 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
62#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS)) 49#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS))
63#define CLR_TC_NCLS(v) ( v & ~TC_NCLS) 50#define CLR_TC_NCLS(v) ( v & ~TC_NCLS)
64 51
65#define S_TC_RTTL _TC_MAKE32(9)
66#define M_TC_RTTL _TC_MAKEMASK(3,S_TC_RTTL)
67#define G_TC_RTTL(x) _TC_GETVALUE(x,S_TC_RTTL,M_TC_RTTL)
68#define V_TC_RTTL(x) _TC_MAKEVALUE(x,S_TC_RTTL)
69#define SET_TC_RTTL(v,n) ((V_TC_RTTL(n)) | (v & ~M_TC_RTTL))
70
71#define S_TC_AT _TC_MAKE32(12) 52#define S_TC_AT _TC_MAKE32(12)
72#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT) 53#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT)
73#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT) 54#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT)
74#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT) 55#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT)
75#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT)) 56#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT))
76 57
58#define MAX_REC_LOOP 4
59#define MAX_RED_LOOP 4
60#endif
61
77/* Action attributes */ 62/* Action attributes */
78enum { 63enum {
79 TCA_ACT_UNSPEC, 64 TCA_ACT_UNSPEC,
@@ -93,8 +78,6 @@ enum {
93#define TCA_ACT_NOUNBIND 0 78#define TCA_ACT_NOUNBIND 0
94#define TCA_ACT_REPLACE 1 79#define TCA_ACT_REPLACE 1
95#define TCA_ACT_NOREPLACE 0 80#define TCA_ACT_NOREPLACE 0
96#define MAX_REC_LOOP 4
97#define MAX_RED_LOOP 4
98 81
99#define TC_ACT_UNSPEC (-1) 82#define TC_ACT_UNSPEC (-1)
100#define TC_ACT_OK 0 83#define TC_ACT_OK 0
@@ -404,6 +387,36 @@ enum {
404 387
405#define TCA_BPF_MAX (__TCA_BPF_MAX - 1) 388#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
406 389
390/* Flower classifier */
391
392enum {
393 TCA_FLOWER_UNSPEC,
394 TCA_FLOWER_CLASSID,
395 TCA_FLOWER_INDEV,
396 TCA_FLOWER_ACT,
397 TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
398 TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
399 TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
400 TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
401 TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
402 TCA_FLOWER_KEY_IP_PROTO, /* u8 */
403 TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
404 TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
405 TCA_FLOWER_KEY_IPV4_DST, /* be32 */
406 TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
407 TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
408 TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
409 TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
410 TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
411 TCA_FLOWER_KEY_TCP_SRC, /* be16 */
412 TCA_FLOWER_KEY_TCP_DST, /* be16 */
413 TCA_FLOWER_KEY_UDP_SRC, /* be16 */
414 TCA_FLOWER_KEY_UDP_DST, /* be16 */
415 __TCA_FLOWER_MAX,
416};
417
418#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
419
407/* Extended Matches */ 420/* Extended Matches */
408 421
409struct tcf_ematch_tree_hdr { 422struct tcf_ematch_tree_hdr {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 534b84710745..8d2530daca9f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -268,7 +268,8 @@ enum {
268 TCA_GRED_STAB, 268 TCA_GRED_STAB,
269 TCA_GRED_DPS, 269 TCA_GRED_DPS,
270 TCA_GRED_MAX_P, 270 TCA_GRED_MAX_P,
271 __TCA_GRED_MAX, 271 TCA_GRED_LIMIT,
272 __TCA_GRED_MAX,
272}; 273};
273 274
274#define TCA_GRED_MAX (__TCA_GRED_MAX - 1) 275#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
@@ -679,6 +680,7 @@ enum {
679 TCA_CODEL_LIMIT, 680 TCA_CODEL_LIMIT,
680 TCA_CODEL_INTERVAL, 681 TCA_CODEL_INTERVAL,
681 TCA_CODEL_ECN, 682 TCA_CODEL_ECN,
683 TCA_CODEL_CE_THRESHOLD,
682 __TCA_CODEL_MAX 684 __TCA_CODEL_MAX
683}; 685};
684 686
@@ -695,6 +697,7 @@ struct tc_codel_xstats {
695 __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ 697 __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
696 __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ 698 __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
697 __u32 dropping; /* are we in dropping state ? */ 699 __u32 dropping; /* are we in dropping state ? */
700 __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
698}; 701};
699 702
700/* FQ_CODEL */ 703/* FQ_CODEL */
@@ -707,6 +710,7 @@ enum {
707 TCA_FQ_CODEL_ECN, 710 TCA_FQ_CODEL_ECN,
708 TCA_FQ_CODEL_FLOWS, 711 TCA_FQ_CODEL_FLOWS,
709 TCA_FQ_CODEL_QUANTUM, 712 TCA_FQ_CODEL_QUANTUM,
713 TCA_FQ_CODEL_CE_THRESHOLD,
710 __TCA_FQ_CODEL_MAX 714 __TCA_FQ_CODEL_MAX
711}; 715};
712 716
@@ -730,6 +734,7 @@ struct tc_fq_codel_qd_stats {
730 */ 734 */
731 __u32 new_flows_len; /* count of flows in new list */ 735 __u32 new_flows_len; /* count of flows in new list */
732 __u32 old_flows_len; /* count of flows in old list */ 736 __u32 old_flows_len; /* count of flows in old list */
737 __u32 ce_mark; /* packets above ce_threshold */
733}; 738};
734 739
735struct tc_fq_codel_cl_stats { 740struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 91950950aa59..0f9265cb2a96 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -38,6 +38,8 @@
38 38
39#define RDS_IB_ABI_VERSION 0x301 39#define RDS_IB_ABI_VERSION 0x301
40 40
41#define SOL_RDS 276
42
41/* 43/*
42 * setsockopt/getsockopt for SOL_RDS 44 * setsockopt/getsockopt for SOL_RDS
43 */ 45 */
@@ -48,6 +50,14 @@
48#define RDS_RECVERR 5 50#define RDS_RECVERR 5
49#define RDS_CONG_MONITOR 6 51#define RDS_CONG_MONITOR 6
50#define RDS_GET_MR_FOR_DEST 7 52#define RDS_GET_MR_FOR_DEST 7
53#define SO_RDS_TRANSPORT 8
54
55/* supported values for SO_RDS_TRANSPORT */
56#define RDS_TRANS_IB 0
57#define RDS_TRANS_IWARP 1
58#define RDS_TRANS_TCP 2
59#define RDS_TRANS_COUNT 3
60#define RDS_TRANS_NONE (~0)
51 61
52/* 62/*
53 * Control message types for SOL_RDS. 63 * Control message types for SOL_RDS.
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 17fb02f488da..fdd8f07f1d34 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -338,6 +338,9 @@ struct rtnexthop {
338#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */ 338#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
339#define RTNH_F_ONLINK 4 /* Gateway is forced on link */ 339#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
340#define RTNH_F_OFFLOAD 8 /* offloaded route */ 340#define RTNH_F_OFFLOAD 8 /* offloaded route */
341#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
342
343#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
341 344
342/* Macros to handle hexthops */ 345/* Macros to handle hexthops */
343 346
diff --git a/include/uapi/linux/scif_ioctl.h b/include/uapi/linux/scif_ioctl.h
new file mode 100644
index 000000000000..4a94d917cf99
--- /dev/null
+++ b/include/uapi/linux/scif_ioctl.h
@@ -0,0 +1,130 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
22 * Copyright(c) 2014 Intel Corporation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * Intel SCIF driver.
51 *
52 */
53/*
54 * -----------------------------------------
55 * SCIF IOCTL interface information
56 * -----------------------------------------
57 */
58#ifndef SCIF_IOCTL_H
59#define SCIF_IOCTL_H
60
61#include <linux/types.h>
62
63/**
64 * struct scif_port_id - SCIF port information
65 * @node: node on which port resides
66 * @port: local port number
67 */
68struct scif_port_id {
69 __u16 node;
70 __u16 port;
71};
72
73/**
74 * struct scifioctl_connect - used for SCIF_CONNECT IOCTL
75 * @self: used to read back the assigned port_id
76 * @peer: destination node and port to connect to
77 */
78struct scifioctl_connect {
79 struct scif_port_id self;
80 struct scif_port_id peer;
81};
82
83/**
84 * struct scifioctl_accept - used for SCIF_ACCEPTREQ IOCTL
85 * @flags: flags
86 * @peer: global id of peer endpoint
87 * @endpt: new connected endpoint descriptor
88 */
89struct scifioctl_accept {
90 __s32 flags;
91 struct scif_port_id peer;
92 __u64 endpt;
93};
94
95/**
96 * struct scifioctl_msg - used for SCIF_SEND/SCIF_RECV IOCTL
97 * @msg: message buffer address
98 * @len: message length
99 * @flags: flags
100 * @out_len: number of bytes sent/received
101 */
102struct scifioctl_msg {
103 __u64 msg;
104 __s32 len;
105 __s32 flags;
106 __s32 out_len;
107};
108
109/**
110 * struct scifioctl_node_ids - used for SCIF_GET_NODEIDS IOCTL
111 * @nodes: pointer to an array of node_ids
112 * @self: ID of the current node
113 * @len: length of array
114 */
115struct scifioctl_node_ids {
116 __u64 nodes;
117 __u64 self;
118 __s32 len;
119};
120
121#define SCIF_BIND _IOWR('s', 1, __u64)
122#define SCIF_LISTEN _IOW('s', 2, __s32)
123#define SCIF_CONNECT _IOWR('s', 3, struct scifioctl_connect)
124#define SCIF_ACCEPTREQ _IOWR('s', 4, struct scifioctl_accept)
125#define SCIF_ACCEPTREG _IOWR('s', 5, __u64)
126#define SCIF_SEND _IOWR('s', 6, struct scifioctl_msg)
127#define SCIF_RECV _IOWR('s', 7, struct scifioctl_msg)
128#define SCIF_GET_NODEIDS _IOWR('s', 14, struct scifioctl_node_ids)
129
130#endif /* SCIF_IOCTL_H */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index b2122813f18a..93ba148f923e 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -258,4 +258,7 @@
258/* Cris v10 / v32 SoC */ 258/* Cris v10 / v32 SoC */
259#define PORT_CRIS 112 259#define PORT_CRIS 112
260 260
261/* STM32 USART */
262#define PORT_STM32 113
263
261#endif /* _UAPILINUX_SERIAL_CORE_H */ 264#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index e9b4cb0cd7ed..1e5ac4e776da 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -331,6 +331,9 @@
331 * Extra serial register definitions for the internal UARTs 331 * Extra serial register definitions for the internal UARTs
332 * in TI OMAP processors. 332 * in TI OMAP processors.
333 */ 333 */
334#define OMAP1_UART1_BASE 0xfffb0000
335#define OMAP1_UART2_BASE 0xfffb0800
336#define OMAP1_UART3_BASE 0xfffb9800
334#define UART_OMAP_MDR1 0x08 /* Mode definition register */ 337#define UART_OMAP_MDR1 0x08 /* Mode definition register */
335#define UART_OMAP_MDR2 0x09 /* Mode definition register 2 */ 338#define UART_OMAP_MDR2 0x09 /* Mode definition register 2 */
336#define UART_OMAP_SCR 0x10 /* Supplementary control register */ 339#define UART_OMAP_SCR 0x10 /* Supplementary control register */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 6a6fb747c78d..eee8968407f0 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -276,6 +276,8 @@ enum
276 LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */ 276 LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */
277 LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */ 277 LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */
278 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */ 278 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
279 LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
280 LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
279 __LINUX_MIB_MAX 281 __LINUX_MIB_MAX
280}; 282};
281 283
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index b00e29efb161..49230d36f9ce 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -23,4 +23,14 @@ enum {
23 SK_MEMINFO_VARS, 23 SK_MEMINFO_VARS,
24}; 24};
25 25
26enum sknetlink_groups {
27 SKNLGRP_NONE,
28 SKNLGRP_INET_TCP_DESTROY,
29 SKNLGRP_INET_UDP_DESTROY,
30 SKNLGRP_INET6_TCP_DESTROY,
31 SKNLGRP_INET6_UDP_DESTROY,
32 __SKNLGRP_MAX,
33};
34#define SKNLGRP_MAX (__SKNLGRP_MAX - 1)
35
26#endif /* _UAPI__SOCK_DIAG_H__ */ 36#endif /* _UAPI__SOCK_DIAG_H__ */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index faa72f4fa547..65a77b071e22 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -113,6 +113,8 @@ enum {
113#define TCP_TIMESTAMP 24 113#define TCP_TIMESTAMP 24
114#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */ 114#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
115#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */ 115#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */
116#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */
117#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
116 118
117struct tcp_repair_opt { 119struct tcp_repair_opt {
118 __u32 opt_code; 120 __u32 opt_code;
@@ -190,8 +192,10 @@ struct tcp_info {
190 192
191 __u64 tcpi_pacing_rate; 193 __u64 tcpi_pacing_rate;
192 __u64 tcpi_max_pacing_rate; 194 __u64 tcpi_max_pacing_rate;
193 __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ 195 __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
194 __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ 196 __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
197 __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */
198 __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */
195}; 199};
196 200
197/* for TCP_MD5SIG socket option */ 201/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index dac199a2dba5..01c4410352ff 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -34,5 +34,6 @@
34#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ 34#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
35#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */ 35#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */
36#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */ 36#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */
37#define N_NCI 25 /* NFC NCI UART */
37 38
38#endif /* _UAPI_LINUX_TTY_H */ 39#endif /* _UAPI_LINUX_TTY_H */
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index fae4864737fa..072e41e45ee2 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -15,7 +15,7 @@
15#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */ 15#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */
16#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */ 16#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */
17#define ASYNCB_SPLIT_TERMIOS 3 /* [x] Separate termios for dialin/callout */ 17#define ASYNCB_SPLIT_TERMIOS 3 /* [x] Separate termios for dialin/callout */
18#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */ 18#define ASYNCB_SPD_HI 4 /* Use 57600 instead of 38400 bps */
19#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */ 19#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */
20#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */ 20#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */
21#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during 21#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 26db20647e6f..9cac6325cc7e 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -24,6 +24,7 @@
24 * @colorspace: colorspace of the data (from enum v4l2_colorspace) 24 * @colorspace: colorspace of the data (from enum v4l2_colorspace)
25 * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding) 25 * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding)
26 * @quantization: quantization of the data (from enum v4l2_quantization) 26 * @quantization: quantization of the data (from enum v4l2_quantization)
27 * @xfer_func: transfer function of the data (from enum v4l2_xfer_func)
27 */ 28 */
28struct v4l2_mbus_framefmt { 29struct v4l2_mbus_framefmt {
29 __u32 width; 30 __u32 width;
@@ -33,7 +34,8 @@ struct v4l2_mbus_framefmt {
33 __u32 colorspace; 34 __u32 colorspace;
34 __u16 ycbcr_enc; 35 __u16 ycbcr_enc;
35 __u16 quantization; 36 __u16 quantization;
36 __u32 reserved[6]; 37 __u16 xfer_func;
38 __u16 reserved[11];
37}; 39};
38 40
39#ifndef __KERNEL__ 41#ifndef __KERNEL__
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index b57b750c222f..9fd7b5d8df2f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -36,6 +36,8 @@
36/* Two-stage IOMMU */ 36/* Two-stage IOMMU */
37#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ 37#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
38 38
39#define VFIO_SPAPR_TCE_v2_IOMMU 7
40
39/* 41/*
40 * The IOCTL interface is designed for extensibility by embedding the 42 * The IOCTL interface is designed for extensibility by embedding the
41 * structure length (argsz) and flags into structures passed between 43 * structure length (argsz) and flags into structures passed between
@@ -443,6 +445,23 @@ struct vfio_iommu_type1_dma_unmap {
443/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ 445/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
444 446
445/* 447/*
448 * The SPAPR TCE DDW info struct provides the information about
449 * the details of Dynamic DMA window capability.
450 *
451 * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
452 * @max_dynamic_windows_supported tells the maximum number of windows
453 * which the platform can create.
454 * @levels tells the maximum number of levels in multi-level IOMMU tables;
455 * this allows splitting a table into smaller chunks which reduces
456 * the amount of physically contiguous memory required for the table.
457 */
458struct vfio_iommu_spapr_tce_ddw_info {
459 __u64 pgsizes; /* Bitmap of supported page sizes */
460 __u32 max_dynamic_windows_supported;
461 __u32 levels;
462};
463
464/*
446 * The SPAPR TCE info struct provides the information about the PCI bus 465 * The SPAPR TCE info struct provides the information about the PCI bus
447 * address ranges available for DMA, these values are programmed into 466 * address ranges available for DMA, these values are programmed into
448 * the hardware so the guest has to know that information. 467 * the hardware so the guest has to know that information.
@@ -452,14 +471,17 @@ struct vfio_iommu_type1_dma_unmap {
452 * addresses too so the window works as a filter rather than an offset 471 * addresses too so the window works as a filter rather than an offset
453 * for IOVA addresses. 472 * for IOVA addresses.
454 * 473 *
455 * A flag will need to be added if other page sizes are supported, 474 * Flags supported:
456 * so as defined here, it is always 4k. 475 * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
476 * (DDW) support is present. @ddw is only supported when DDW is present.
457 */ 477 */
458struct vfio_iommu_spapr_tce_info { 478struct vfio_iommu_spapr_tce_info {
459 __u32 argsz; 479 __u32 argsz;
460 __u32 flags; /* reserved for future use */ 480 __u32 flags;
481#define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */
461 __u32 dma32_window_start; /* 32 bit window start (bytes) */ 482 __u32 dma32_window_start; /* 32 bit window start (bytes) */
462 __u32 dma32_window_size; /* 32 bit window size (bytes) */ 483 __u32 dma32_window_size; /* 32 bit window size (bytes) */
484 struct vfio_iommu_spapr_tce_ddw_info ddw;
463}; 485};
464 486
465#define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) 487#define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
@@ -470,12 +492,23 @@ struct vfio_iommu_spapr_tce_info {
470 * - unfreeze IO/DMA for frozen PE; 492 * - unfreeze IO/DMA for frozen PE;
471 * - read PE state; 493 * - read PE state;
472 * - reset PE; 494 * - reset PE;
473 * - configure PE. 495 * - configure PE;
496 * - inject EEH error.
474 */ 497 */
498struct vfio_eeh_pe_err {
499 __u32 type;
500 __u32 func;
501 __u64 addr;
502 __u64 mask;
503};
504
475struct vfio_eeh_pe_op { 505struct vfio_eeh_pe_op {
476 __u32 argsz; 506 __u32 argsz;
477 __u32 flags; 507 __u32 flags;
478 __u32 op; 508 __u32 op;
509 union {
510 struct vfio_eeh_pe_err err;
511 };
479}; 512};
480 513
481#define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */ 514#define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */
@@ -492,9 +525,70 @@ struct vfio_eeh_pe_op {
492#define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */ 525#define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */
493#define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */ 526#define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */
494#define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */ 527#define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */
528#define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */
495 529
496#define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21) 530#define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21)
497 531
532/**
533 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
534 *
535 * Registers user space memory where DMA is allowed. It pins
536 * user pages and does the locked memory accounting so
537 * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
538 * get faster.
539 */
540struct vfio_iommu_spapr_register_memory {
541 __u32 argsz;
542 __u32 flags;
543 __u64 vaddr; /* Process virtual address */
544 __u64 size; /* Size of mapping (bytes) */
545};
546#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
547
548/**
549 * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
550 *
551 * Unregisters user space memory registered with
552 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
553 * Uses vfio_iommu_spapr_register_memory for parameters.
554 */
555#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
556
557/**
558 * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
559 *
560 * Creates an additional TCE table and programs it (sets a new DMA window)
561 * to every IOMMU group in the container. It receives page shift, window
562 * size and number of levels in the TCE table being created.
563 *
564 * It allocates and returns an offset on a PCI bus of the new DMA window.
565 */
566struct vfio_iommu_spapr_tce_create {
567 __u32 argsz;
568 __u32 flags;
569 /* in */
570 __u32 page_shift;
571 __u64 window_size;
572 __u32 levels;
573 /* out */
574 __u64 start_addr;
575};
576#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
577
578/**
579 * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
580 *
581 * Unprograms a TCE table from all groups in the container and destroys it.
582 * It receives a PCI bus offset as a window id.
583 */
584struct vfio_iommu_spapr_tce_remove {
585 __u32 argsz;
586 __u32 flags;
587 /* in */
588 __u64 start_addr;
589};
590#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
591
498/* ***************************************************************** */ 592/* ***************************************************************** */
499 593
500#endif /* _UAPIVFIO_H */ 594#endif /* _UAPIVFIO_H */
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index bb6a5b4cb3c5..ab3731917bac 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -103,6 +103,20 @@ struct vhost_memory {
103/* Get accessor: reads index, writes value in num */ 103/* Get accessor: reads index, writes value in num */
104#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state) 104#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
105 105
106/* Set the vring byte order in num. Valid values are VHOST_VRING_LITTLE_ENDIAN
107 * or VHOST_VRING_BIG_ENDIAN (other values return -EINVAL).
108 * The byte order cannot be changed while the device is active: trying to do so
109 * returns -EBUSY.
110 * This is a legacy only API that is simply ignored when VIRTIO_F_VERSION_1 is
111 * set.
112 * Not all kernel configurations support this ioctl, but all configurations that
113 * support SET also support GET.
114 */
115#define VHOST_VRING_LITTLE_ENDIAN 0
116#define VHOST_VRING_BIG_ENDIAN 1
117#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
118#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
119
106/* The following ioctls use eventfd file descriptors to signal and poll 120/* The following ioctls use eventfd file descriptors to signal and poll
107 * for events. */ 121 * for events. */
108 122
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fa376f7666ba..3228fbebcd63 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -178,6 +178,12 @@ enum v4l2_memory {
178 178
179/* see also http://vektor.theorem.ca/graphics/ycbcr/ */ 179/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
180enum v4l2_colorspace { 180enum v4l2_colorspace {
181 /*
182 * Default colorspace, i.e. let the driver figure it out.
183 * Can only be used with video capture.
184 */
185 V4L2_COLORSPACE_DEFAULT = 0,
186
181 /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */ 187 /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */
182 V4L2_COLORSPACE_SMPTE170M = 1, 188 V4L2_COLORSPACE_SMPTE170M = 1,
183 189
@@ -220,8 +226,56 @@ enum v4l2_colorspace {
220 226
221 /* BT.2020 colorspace, used for UHDTV. */ 227 /* BT.2020 colorspace, used for UHDTV. */
222 V4L2_COLORSPACE_BT2020 = 10, 228 V4L2_COLORSPACE_BT2020 = 10,
229
230 /* Raw colorspace: for RAW unprocessed images */
231 V4L2_COLORSPACE_RAW = 11,
232};
233
234/*
235 * Determine how COLORSPACE_DEFAULT should map to a proper colorspace.
236 * This depends on whether this is a SDTV image (use SMPTE 170M), an
237 * HDTV image (use Rec. 709), or something else (use sRGB).
238 */
239#define V4L2_MAP_COLORSPACE_DEFAULT(is_sdtv, is_hdtv) \
240 ((is_sdtv) ? V4L2_COLORSPACE_SMPTE170M : \
241 ((is_hdtv) ? V4L2_COLORSPACE_REC709 : V4L2_COLORSPACE_SRGB))
242
243enum v4l2_xfer_func {
244 /*
245 * Mapping of V4L2_XFER_FUNC_DEFAULT to actual transfer functions
246 * for the various colorspaces:
247 *
248 * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
249 * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_REC709 and
250 * V4L2_COLORSPACE_BT2020: V4L2_XFER_FUNC_709
251 *
252 * V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
253 *
254 * V4L2_COLORSPACE_ADOBERGB: V4L2_XFER_FUNC_ADOBERGB
255 *
256 * V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
257 *
258 * V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE
259 */
260 V4L2_XFER_FUNC_DEFAULT = 0,
261 V4L2_XFER_FUNC_709 = 1,
262 V4L2_XFER_FUNC_SRGB = 2,
263 V4L2_XFER_FUNC_ADOBERGB = 3,
264 V4L2_XFER_FUNC_SMPTE240M = 4,
265 V4L2_XFER_FUNC_NONE = 5,
223}; 266};
224 267
268/*
269 * Determine how XFER_FUNC_DEFAULT should map to a proper transfer function.
270 * This depends on the colorspace.
271 */
272#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
273 ((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
274 ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
275 ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
276 ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \
277 V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709))))
278
225enum v4l2_ycbcr_encoding { 279enum v4l2_ycbcr_encoding {
226 /* 280 /*
227 * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the 281 * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the
@@ -266,6 +320,16 @@ enum v4l2_ycbcr_encoding {
266 V4L2_YCBCR_ENC_SMPTE240M = 8, 320 V4L2_YCBCR_ENC_SMPTE240M = 8,
267}; 321};
268 322
323/*
324 * Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding.
325 * This depends on the colorspace.
326 */
327#define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \
328 ((colsp) == V4L2_COLORSPACE_REC709 ? V4L2_YCBCR_ENC_709 : \
329 ((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \
330 ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \
331 V4L2_YCBCR_ENC_601)))
332
269enum v4l2_quantization { 333enum v4l2_quantization {
270 /* 334 /*
271 * The default for R'G'B' quantization is always full range, except 335 * The default for R'G'B' quantization is always full range, except
@@ -278,6 +342,17 @@ enum v4l2_quantization {
278 V4L2_QUANTIZATION_LIM_RANGE = 2, 342 V4L2_QUANTIZATION_LIM_RANGE = 2,
279}; 343};
280 344
345/*
346 * Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
347 * This depends on whether the image is RGB or not, the colorspace and the
348 * Y'CbCr encoding.
349 */
350#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
351 (((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
352 (((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
353 (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
354 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
355
281enum v4l2_priority { 356enum v4l2_priority {
282 V4L2_PRIORITY_UNSET = 0, /* not initialized */ 357 V4L2_PRIORITY_UNSET = 0, /* not initialized */
283 V4L2_PRIORITY_BACKGROUND = 1, 358 V4L2_PRIORITY_BACKGROUND = 1,
@@ -370,6 +445,7 @@ struct v4l2_pix_format {
370 __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */ 445 __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
371 __u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */ 446 __u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */
372 __u32 quantization; /* enum v4l2_quantization */ 447 __u32 quantization; /* enum v4l2_quantization */
448 __u32 xfer_func; /* enum v4l2_xfer_func */
373}; 449};
374 450
375/* Pixel format FOURCC depth Description */ 451/* Pixel format FOURCC depth Description */
@@ -404,6 +480,7 @@ struct v4l2_pix_format {
404#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ 480#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
405#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ 481#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
406#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ 482#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
483#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
407 484
408/* Grey bit-packed formats */ 485/* Grey bit-packed formats */
409#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */ 486#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
@@ -810,6 +887,8 @@ struct v4l2_buffer {
810#define V4L2_BUF_FLAG_TSTAMP_SRC_MASK 0x00070000 887#define V4L2_BUF_FLAG_TSTAMP_SRC_MASK 0x00070000
811#define V4L2_BUF_FLAG_TSTAMP_SRC_EOF 0x00000000 888#define V4L2_BUF_FLAG_TSTAMP_SRC_EOF 0x00000000
812#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000 889#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000
890/* mem2mem encoder/decoder */
891#define V4L2_BUF_FLAG_LAST 0x00100000
813 892
814/** 893/**
815 * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor 894 * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
@@ -1865,6 +1944,7 @@ struct v4l2_plane_pix_format {
1865 * @flags: format flags (V4L2_PIX_FMT_FLAG_*) 1944 * @flags: format flags (V4L2_PIX_FMT_FLAG_*)
1866 * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding 1945 * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
1867 * @quantization: enum v4l2_quantization, colorspace quantization 1946 * @quantization: enum v4l2_quantization, colorspace quantization
1947 * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
1868 */ 1948 */
1869struct v4l2_pix_format_mplane { 1949struct v4l2_pix_format_mplane {
1870 __u32 width; 1950 __u32 width;
@@ -1878,7 +1958,8 @@ struct v4l2_pix_format_mplane {
1878 __u8 flags; 1958 __u8 flags;
1879 __u8 ycbcr_enc; 1959 __u8 ycbcr_enc;
1880 __u8 quantization; 1960 __u8 quantization;
1881 __u8 reserved[8]; 1961 __u8 xfer_func;
1962 __u8 reserved[7];
1882} __attribute__ ((packed)); 1963} __attribute__ ((packed));
1883 1964
1884/** 1965/**
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
new file mode 100644
index 000000000000..478be5270e26
--- /dev/null
+++ b/include/uapi/linux/virtio_gpu.h
@@ -0,0 +1,206 @@
1/*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This header is BSD licensed so anyone can use the definitions
11 * to implement compatible drivers/servers:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of IBM nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
30 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
31 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
32 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38#ifndef VIRTIO_GPU_HW_H
39#define VIRTIO_GPU_HW_H
40
41#include <linux/types.h>
42
43enum virtio_gpu_ctrl_type {
44 VIRTIO_GPU_UNDEFINED = 0,
45
46 /* 2d commands */
47 VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
48 VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
49 VIRTIO_GPU_CMD_RESOURCE_UNREF,
50 VIRTIO_GPU_CMD_SET_SCANOUT,
51 VIRTIO_GPU_CMD_RESOURCE_FLUSH,
52 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
53 VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
54 VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
55
56 /* cursor commands */
57 VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
58 VIRTIO_GPU_CMD_MOVE_CURSOR,
59
60 /* success responses */
61 VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
62 VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
63
64 /* error responses */
65 VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
66 VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
67 VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
68 VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
69 VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
70 VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
71};
72
73#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
74
75struct virtio_gpu_ctrl_hdr {
76 __le32 type;
77 __le32 flags;
78 __le64 fence_id;
79 __le32 ctx_id;
80 __le32 padding;
81};
82
83/* data passed in the cursor vq */
84
85struct virtio_gpu_cursor_pos {
86 __le32 scanout_id;
87 __le32 x;
88 __le32 y;
89 __le32 padding;
90};
91
92/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
93struct virtio_gpu_update_cursor {
94 struct virtio_gpu_ctrl_hdr hdr;
95 struct virtio_gpu_cursor_pos pos; /* update & move */
96 __le32 resource_id; /* update only */
97 __le32 hot_x; /* update only */
98 __le32 hot_y; /* update only */
99 __le32 padding;
100};
101
102/* data passed in the control vq, 2d related */
103
104struct virtio_gpu_rect {
105 __le32 x;
106 __le32 y;
107 __le32 width;
108 __le32 height;
109};
110
111/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
112struct virtio_gpu_resource_unref {
113 struct virtio_gpu_ctrl_hdr hdr;
114 __le32 resource_id;
115 __le32 padding;
116};
117
118/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
119struct virtio_gpu_resource_create_2d {
120 struct virtio_gpu_ctrl_hdr hdr;
121 __le32 resource_id;
122 __le32 format;
123 __le32 width;
124 __le32 height;
125};
126
127/* VIRTIO_GPU_CMD_SET_SCANOUT */
128struct virtio_gpu_set_scanout {
129 struct virtio_gpu_ctrl_hdr hdr;
130 struct virtio_gpu_rect r;
131 __le32 scanout_id;
132 __le32 resource_id;
133};
134
135/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
136struct virtio_gpu_resource_flush {
137 struct virtio_gpu_ctrl_hdr hdr;
138 struct virtio_gpu_rect r;
139 __le32 resource_id;
140 __le32 padding;
141};
142
143/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
144struct virtio_gpu_transfer_to_host_2d {
145 struct virtio_gpu_ctrl_hdr hdr;
146 struct virtio_gpu_rect r;
147 __le64 offset;
148 __le32 resource_id;
149 __le32 padding;
150};
151
152struct virtio_gpu_mem_entry {
153 __le64 addr;
154 __le32 length;
155 __le32 padding;
156};
157
158/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
159struct virtio_gpu_resource_attach_backing {
160 struct virtio_gpu_ctrl_hdr hdr;
161 __le32 resource_id;
162 __le32 nr_entries;
163};
164
165/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
166struct virtio_gpu_resource_detach_backing {
167 struct virtio_gpu_ctrl_hdr hdr;
168 __le32 resource_id;
169 __le32 padding;
170};
171
172/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
173#define VIRTIO_GPU_MAX_SCANOUTS 16
174struct virtio_gpu_resp_display_info {
175 struct virtio_gpu_ctrl_hdr hdr;
176 struct virtio_gpu_display_one {
177 struct virtio_gpu_rect r;
178 __le32 enabled;
179 __le32 flags;
180 } pmodes[VIRTIO_GPU_MAX_SCANOUTS];
181};
182
183#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
184
185struct virtio_gpu_config {
186 __u32 events_read;
187 __u32 events_clear;
188 __u32 num_scanouts;
189 __u32 reserved;
190};
191
192/* simple formats for fbcon/X use */
193enum virtio_gpu_formats {
194 VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
195 VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2,
196 VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3,
197 VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4,
198
199 VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
200 VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68,
201
202 VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
203 VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
204};
205
206#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 5f60aa4be50a..77925f587b15 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -39,6 +39,7 @@
39#define VIRTIO_ID_9P 9 /* 9p virtio console */ 39#define VIRTIO_ID_9P 9 /* 9p virtio console */
40#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ 40#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
41#define VIRTIO_ID_CAIF 12 /* Virtio caif */ 41#define VIRTIO_ID_CAIF 12 /* Virtio caif */
42#define VIRTIO_ID_GPU 16 /* virtio GPU */
42#define VIRTIO_ID_INPUT 18 /* virtio input */ 43#define VIRTIO_ID_INPUT 18 /* virtio input */
43 44
44#endif /* _LINUX_VIRTIO_IDS_H */ 45#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 7bbee79ca293..ec32293a00db 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -34,6 +34,7 @@
34/* The feature bitmap for virtio net */ 34/* The feature bitmap for virtio net */
35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
37#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
37#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 38#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
38#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 39#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
39#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ 40#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
226 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 228 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
228 229
230/*
231 * Control network offloads
232 *
233 * Reconfigures the network offloads that Guest can handle.
234 *
235 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
236 *
237 * Command data format matches the feature bit mask exactly.
238 *
239 * See VIRTIO_NET_F_GUEST_* for the list of offloads
240 * that can be enabled/disabled.
241 */
242#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
243#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
244
229#endif /* _LINUX_VIRTIO_NET_H */ 245#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 75301468359f..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
157 __le32 queue_used_hi; /* read-write */ 157 __le32 queue_used_hi; /* read-write */
158}; 158};
159 159
160/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
161struct virtio_pci_cfg_cap {
162 struct virtio_pci_cap cap;
163 __u8 pci_cfg_data[4]; /* Data for BAR access. */
164};
165
160/* Macro versions of offsets for the Old Timers! */ 166/* Macro versions of offsets for the Old Timers! */
161#define VIRTIO_PCI_CAP_VNDR 0 167#define VIRTIO_PCI_CAP_VNDR 0
162#define VIRTIO_PCI_CAP_NEXT 1 168#define VIRTIO_PCI_CAP_NEXT 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 915980ac68df..c07295969b7e 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -31,6 +31,9 @@
31 * SUCH DAMAGE. 31 * SUCH DAMAGE.
32 * 32 *
33 * Copyright Rusty Russell IBM Corporation 2007. */ 33 * Copyright Rusty Russell IBM Corporation 2007. */
34#ifndef __KERNEL__
35#include <stdint.h>
36#endif
34#include <linux/types.h> 37#include <linux/types.h>
35#include <linux/virtio_types.h> 38#include <linux/virtio_types.h>
36 39
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
143 vr->num = num; 146 vr->num = num;
144 vr->desc = p; 147 vr->desc = p;
145 vr->avail = p + num*sizeof(struct vring_desc); 148 vr->avail = p + num*sizeof(struct vring_desc);
146 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) 149 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
147 + align-1) & ~(align - 1)); 150 + align-1) & ~(align - 1));
148} 151}
149 152
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index cd6d789b73ec..99a8ca15fe64 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -32,10 +32,32 @@ struct cxl_ioctl_start_work {
32#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ 32#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
33 CXL_START_WORK_NUM_IRQS) 33 CXL_START_WORK_NUM_IRQS)
34 34
35
36/* Possible modes that an afu can be in */
37#define CXL_MODE_DEDICATED 0x1
38#define CXL_MODE_DIRECTED 0x2
39
40/* possible flags for the cxl_afu_id flags field */
41#define CXL_AFUID_FLAG_SLAVE 0x1 /* In directed-mode afu is in slave mode */
42
43struct cxl_afu_id {
44 __u64 flags; /* One of CXL_AFUID_FLAG_X */
45 __u32 card_id;
46 __u32 afu_offset;
47 __u32 afu_mode; /* one of the CXL_MODE_X */
48 __u32 reserved1;
49 __u64 reserved2;
50 __u64 reserved3;
51 __u64 reserved4;
52 __u64 reserved5;
53 __u64 reserved6;
54};
55
35/* ioctl numbers */ 56/* ioctl numbers */
36#define CXL_MAGIC 0xCA 57#define CXL_MAGIC 0xCA
37#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work) 58#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
38#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32) 59#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
60#define CXL_IOCTL_GET_AFU_ID _IOR(CXL_MAGIC, 0x02, struct cxl_afu_id)
39 61
40#define CXL_READ_MIN_SIZE 0x1000 /* 4K */ 62#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
41 63
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index b513e662d8e4..978841eeaff1 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -91,6 +91,7 @@ enum {
91 91
92enum { 92enum {
93 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, 93 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
94 IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
94 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 95 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
95 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 96 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
96}; 97};
@@ -222,6 +223,8 @@ struct ib_uverbs_ex_query_device_resp {
222 __u32 comp_mask; 223 __u32 comp_mask;
223 __u32 response_length; 224 __u32 response_length;
224 struct ib_uverbs_odp_caps odp_caps; 225 struct ib_uverbs_odp_caps odp_caps;
226 __u64 timestamp_mask;
227 __u64 hca_core_clock; /* in KHZ */
225}; 228};
226 229
227struct ib_uverbs_query_port { 230struct ib_uverbs_query_port {
@@ -353,11 +356,27 @@ struct ib_uverbs_create_cq {
353 __u64 driver_data[0]; 356 __u64 driver_data[0];
354}; 357};
355 358
359struct ib_uverbs_ex_create_cq {
360 __u64 user_handle;
361 __u32 cqe;
362 __u32 comp_vector;
363 __s32 comp_channel;
364 __u32 comp_mask;
365 __u32 flags;
366 __u32 reserved;
367};
368
356struct ib_uverbs_create_cq_resp { 369struct ib_uverbs_create_cq_resp {
357 __u32 cq_handle; 370 __u32 cq_handle;
358 __u32 cqe; 371 __u32 cqe;
359}; 372};
360 373
374struct ib_uverbs_ex_create_cq_resp {
375 struct ib_uverbs_create_cq_resp base;
376 __u32 comp_mask;
377 __u32 response_length;
378};
379
361struct ib_uverbs_resize_cq { 380struct ib_uverbs_resize_cq {
362 __u64 response; 381 __u64 response;
363 __u32 cq_handle; 382 __u32 cq_handle;
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
new file mode 100644
index 000000000000..51b8066a223b
--- /dev/null
+++ b/include/uapi/sound/asoc.h
@@ -0,0 +1,401 @@
1/*
2 * uapi/sound/asoc.h -- ALSA SoC Firmware Controls and DAPM
3 *
4 * Copyright (C) 2012 Texas Instruments Inc.
5 * Copyright (C) 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
12 * algorithms, equalisers, DAIs, widgets etc.
13*/
14
15#ifndef __LINUX_UAPI_SND_ASOC_H
16#define __LINUX_UAPI_SND_ASOC_H
17
18#include <linux/types.h>
19#include <sound/asound.h>
20
21/*
22 * Maximum number of channels topology kcontrol can represent.
23 */
24#define SND_SOC_TPLG_MAX_CHAN 8
25
26/*
27 * Maximum number of PCM formats capability
28 */
29#define SND_SOC_TPLG_MAX_FORMATS 16
30
31/*
32 * Maximum number of PCM stream configs
33 */
34#define SND_SOC_TPLG_STREAM_CONFIG_MAX 8
35
36/* individual kcontrol info types - can be mixed with other types */
37#define SND_SOC_TPLG_CTL_VOLSW 1
38#define SND_SOC_TPLG_CTL_VOLSW_SX 2
39#define SND_SOC_TPLG_CTL_VOLSW_XR_SX 3
40#define SND_SOC_TPLG_CTL_ENUM 4
41#define SND_SOC_TPLG_CTL_BYTES 5
42#define SND_SOC_TPLG_CTL_ENUM_VALUE 6
43#define SND_SOC_TPLG_CTL_RANGE 7
44#define SND_SOC_TPLG_CTL_STROBE 8
45
46
47/* individual widget kcontrol info types - can be mixed with other types */
48#define SND_SOC_TPLG_DAPM_CTL_VOLSW 64
49#define SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE 65
50#define SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT 66
51#define SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE 67
52#define SND_SOC_TPLG_DAPM_CTL_PIN 68
53
54/* DAPM widget types - add new items to the end */
55#define SND_SOC_TPLG_DAPM_INPUT 0
56#define SND_SOC_TPLG_DAPM_OUTPUT 1
57#define SND_SOC_TPLG_DAPM_MUX 2
58#define SND_SOC_TPLG_DAPM_MIXER 3
59#define SND_SOC_TPLG_DAPM_PGA 4
60#define SND_SOC_TPLG_DAPM_OUT_DRV 5
61#define SND_SOC_TPLG_DAPM_ADC 6
62#define SND_SOC_TPLG_DAPM_DAC 7
63#define SND_SOC_TPLG_DAPM_SWITCH 8
64#define SND_SOC_TPLG_DAPM_PRE 9
65#define SND_SOC_TPLG_DAPM_POST 10
66#define SND_SOC_TPLG_DAPM_AIF_IN 11
67#define SND_SOC_TPLG_DAPM_AIF_OUT 12
68#define SND_SOC_TPLG_DAPM_DAI_IN 13
69#define SND_SOC_TPLG_DAPM_DAI_OUT 14
70#define SND_SOC_TPLG_DAPM_DAI_LINK 15
71#define SND_SOC_TPLG_DAPM_LAST SND_SOC_TPLG_DAPM_DAI_LINK
72
73/* Header magic number and string sizes */
74#define SND_SOC_TPLG_MAGIC 0x41536F43 /* ASoC */
75
76/* string sizes */
77#define SND_SOC_TPLG_NUM_TEXTS 16
78
79/* ABI version */
80#define SND_SOC_TPLG_ABI_VERSION 0x3
81
82/* Max size of TLV data */
83#define SND_SOC_TPLG_TLV_SIZE 32
84
85/*
86 * File and Block header data types.
87 * Add new generic and vendor types to end of list.
88 * Generic types are handled by the core whilst vendors types are passed
89 * to the component drivers for handling.
90 */
91#define SND_SOC_TPLG_TYPE_MIXER 1
92#define SND_SOC_TPLG_TYPE_BYTES 2
93#define SND_SOC_TPLG_TYPE_ENUM 3
94#define SND_SOC_TPLG_TYPE_DAPM_GRAPH 4
95#define SND_SOC_TPLG_TYPE_DAPM_WIDGET 5
96#define SND_SOC_TPLG_TYPE_DAI_LINK 6
97#define SND_SOC_TPLG_TYPE_PCM 7
98#define SND_SOC_TPLG_TYPE_MANIFEST 8
99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
100#define SND_SOC_TPLG_TYPE_PDATA 10
101#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
102
103/* vendor block IDs - please add new vendor types to end */
104#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
105#define SND_SOC_TPLG_TYPE_VENDOR_CONFIG 1001
106#define SND_SOC_TPLG_TYPE_VENDOR_COEFF 1002
107#define SND_SOC_TPLG_TYPEVENDOR_CODEC 1003
108
109#define SND_SOC_TPLG_STREAM_PLAYBACK 0
110#define SND_SOC_TPLG_STREAM_CAPTURE 1
111
112/*
113 * Block Header.
114 * This header precedes all object and object arrays below.
115 */
116struct snd_soc_tplg_hdr {
117 __le32 magic; /* magic number */
118 __le32 abi; /* ABI version */
119 __le32 version; /* optional vendor specific version details */
120 __le32 type; /* SND_SOC_TPLG_TYPE_ */
121 __le32 size; /* size of this structure */
122 __le32 vendor_type; /* optional vendor specific type info */
123 __le32 payload_size; /* data bytes, excluding this header */
124 __le32 index; /* identifier for block */
125 __le32 count; /* number of elements in block */
126} __attribute__((packed));
127
128/*
129 * Private data.
130 * All topology objects may have private data that can be used by the driver or
131 * firmware. Core will ignore this data.
132 */
133struct snd_soc_tplg_private {
134 __le32 size; /* in bytes of private data */
135 char data[0];
136} __attribute__((packed));
137
138/*
139 * Kcontrol TLV data.
140 */
141struct snd_soc_tplg_tlv_dbscale {
142 __le32 min;
143 __le32 step;
144 __le32 mute;
145} __attribute__((packed));
146
147struct snd_soc_tplg_ctl_tlv {
148 __le32 size; /* in bytes of this structure */
149 __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
150 union {
151 __le32 data[SND_SOC_TPLG_TLV_SIZE];
152 struct snd_soc_tplg_tlv_dbscale scale;
153 };
154} __attribute__((packed));
155
156/*
157 * Kcontrol channel data
158 */
159struct snd_soc_tplg_channel {
160 __le32 size; /* in bytes of this structure */
161 __le32 reg;
162 __le32 shift;
163 __le32 id; /* ID maps to Left, Right, LFE etc */
164} __attribute__((packed));
165
166/*
167 * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
168 * Kcontrol ops need get/put/info.
169 * Bytes ext ops need get/put.
170 */
171struct snd_soc_tplg_io_ops {
172 __le32 get;
173 __le32 put;
174 __le32 info;
175} __attribute__((packed));
176
177/*
178 * kcontrol header
179 */
180struct snd_soc_tplg_ctl_hdr {
181 __le32 size; /* in bytes of this structure */
182 __le32 type;
183 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
184 __le32 access;
185 struct snd_soc_tplg_io_ops ops;
186 struct snd_soc_tplg_ctl_tlv tlv;
187} __attribute__((packed));
188
189/*
190 * Stream Capabilities
191 */
192struct snd_soc_tplg_stream_caps {
193 __le32 size; /* in bytes of this structure */
194 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
195 __le64 formats[SND_SOC_TPLG_MAX_FORMATS]; /* supported formats SNDRV_PCM_FMTBIT_* */
196 __le32 rates; /* supported rates SNDRV_PCM_RATE_* */
197 __le32 rate_min; /* min rate */
198 __le32 rate_max; /* max rate */
199 __le32 channels_min; /* min channels */
200 __le32 channels_max; /* max channels */
201 __le32 periods_min; /* min number of periods */
202 __le32 periods_max; /* max number of periods */
203 __le32 period_size_min; /* min period size bytes */
204 __le32 period_size_max; /* max period size bytes */
205 __le32 buffer_size_min; /* min buffer size bytes */
206 __le32 buffer_size_max; /* max buffer size bytes */
207} __attribute__((packed));
208
209/*
210 * FE or BE Stream configuration supported by SW/FW
211 */
212struct snd_soc_tplg_stream {
213 __le32 size; /* in bytes of this structure */
214 __le64 format; /* SNDRV_PCM_FMTBIT_* */
215 __le32 rate; /* SNDRV_PCM_RATE_* */
216 __le32 period_bytes; /* size of period in bytes */
217 __le32 buffer_bytes; /* size of buffer in bytes */
218 __le32 channels; /* channels */
219 __le32 tdm_slot; /* optional BE bitmask of supported TDM slots */
220 __le32 dai_fmt; /* SND_SOC_DAIFMT_ */
221} __attribute__((packed));
222
223/*
224 * Duplex stream configuration supported by SW/FW.
225 */
226struct snd_soc_tplg_stream_config {
227 __le32 size; /* in bytes of this structure */
228 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
229 struct snd_soc_tplg_stream playback;
230 struct snd_soc_tplg_stream capture;
231} __attribute__((packed));
232
233/*
234 * Manifest. List totals for each payload type. Not used in parsing, but will
235 * be passed to the component driver before any other objects in order for any
236 * global component resource allocations.
237 *
238 * File block representation for manifest :-
239 * +-----------------------------------+----+
240 * | struct snd_soc_tplg_hdr | 1 |
241 * +-----------------------------------+----+
242 * | struct snd_soc_tplg_manifest | 1 |
243 * +-----------------------------------+----+
244 */
245struct snd_soc_tplg_manifest {
246 __le32 size; /* in bytes of this structure */
247 __le32 control_elems; /* number of control elements */
248 __le32 widget_elems; /* number of widget elements */
249 __le32 graph_elems; /* number of graph elements */
250 __le32 dai_elems; /* number of DAI elements */
251 __le32 dai_link_elems; /* number of DAI link elements */
252 struct snd_soc_tplg_private priv;
253} __attribute__((packed));
254
255/*
256 * Mixer kcontrol.
257 *
258 * File block representation for mixer kcontrol :-
259 * +-----------------------------------+----+
260 * | struct snd_soc_tplg_hdr | 1 |
261 * +-----------------------------------+----+
262 * | struct snd_soc_tplg_mixer_control | N |
263 * +-----------------------------------+----+
264 */
265struct snd_soc_tplg_mixer_control {
266 struct snd_soc_tplg_ctl_hdr hdr;
267 __le32 size; /* in bytes of this structure */
268 __le32 min;
269 __le32 max;
270 __le32 platform_max;
271 __le32 invert;
272 __le32 num_channels;
273 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
274 struct snd_soc_tplg_private priv;
275} __attribute__((packed));
276
277/*
278 * Enumerated kcontrol
279 *
280 * File block representation for enum kcontrol :-
281 * +-----------------------------------+----+
282 * | struct snd_soc_tplg_hdr | 1 |
283 * +-----------------------------------+----+
284 * | struct snd_soc_tplg_enum_control | N |
285 * +-----------------------------------+----+
286 */
287struct snd_soc_tplg_enum_control {
288 struct snd_soc_tplg_ctl_hdr hdr;
289 __le32 size; /* in bytes of this structure */
290 __le32 num_channels;
291 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
292 __le32 items;
293 __le32 mask;
294 __le32 count;
295 char texts[SND_SOC_TPLG_NUM_TEXTS][SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
296 __le32 values[SND_SOC_TPLG_NUM_TEXTS * SNDRV_CTL_ELEM_ID_NAME_MAXLEN / 4];
297 struct snd_soc_tplg_private priv;
298} __attribute__((packed));
299
300/*
301 * Bytes kcontrol
302 *
303 * File block representation for bytes kcontrol :-
304 * +-----------------------------------+----+
305 * | struct snd_soc_tplg_hdr | 1 |
306 * +-----------------------------------+----+
307 * | struct snd_soc_tplg_bytes_control | N |
308 * +-----------------------------------+----+
309 */
310struct snd_soc_tplg_bytes_control {
311 struct snd_soc_tplg_ctl_hdr hdr;
312 __le32 size; /* in bytes of this structure */
313 __le32 max;
314 __le32 mask;
315 __le32 base;
316 __le32 num_regs;
317 struct snd_soc_tplg_io_ops ext_ops;
318 struct snd_soc_tplg_private priv;
319} __attribute__((packed));
320
321/*
322 * DAPM Graph Element
323 *
324 * File block representation for DAPM graph elements :-
325 * +-------------------------------------+----+
326 * | struct snd_soc_tplg_hdr | 1 |
327 * +-------------------------------------+----+
328 * | struct snd_soc_tplg_dapm_graph_elem | N |
329 * +-------------------------------------+----+
330 */
331struct snd_soc_tplg_dapm_graph_elem {
332 char sink[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
333 char control[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
334 char source[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
335} __attribute__((packed));
336
337/*
338 * DAPM Widget.
339 *
340 * File block representation for DAPM widget :-
341 * +-------------------------------------+-----+
342 * | struct snd_soc_tplg_hdr | 1 |
343 * +-------------------------------------+-----+
344 * | struct snd_soc_tplg_dapm_widget | N |
345 * +-------------------------------------+-----+
346 * | struct snd_soc_tplg_enum_control | 0|1 |
347 * | struct snd_soc_tplg_mixer_control | 0|N |
348 * +-------------------------------------+-----+
349 *
350 * Optional enum or mixer control can be appended to the end of each widget
351 * in the block.
352 */
353struct snd_soc_tplg_dapm_widget {
354 __le32 size; /* in bytes of this structure */
355 __le32 id; /* SND_SOC_DAPM_CTL */
356 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
357 char sname[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
358
359 __le32 reg; /* negative reg = no direct dapm */
360 __le32 shift; /* bits to shift */
361 __le32 mask; /* non-shifted mask */
362 __le32 subseq; /* sort within widget type */
363 __u32 invert; /* invert the power bit */
364 __u32 ignore_suspend; /* kept enabled over suspend */
365 __u16 event_flags;
366 __u16 event_type;
367 __u16 num_kcontrols;
368 struct snd_soc_tplg_private priv;
369 /*
370 * kcontrols that relate to this widget
371 * follow here after widget private data
372 */
373} __attribute__((packed));
374
375struct snd_soc_tplg_pcm_cfg_caps {
376 struct snd_soc_tplg_stream_caps caps;
377 struct snd_soc_tplg_stream_config configs[SND_SOC_TPLG_STREAM_CONFIG_MAX];
378 __le32 num_configs; /* number of configs */
379} __attribute__((packed));
380
381/*
382 * Describes SW/FW specific features of PCM or DAI link.
383 *
384 * File block representation for PCM/DAI-Link :-
385 * +-----------------------------------+-----+
386 * | struct snd_soc_tplg_hdr | 1 |
387 * +-----------------------------------+-----+
388 * | struct snd_soc_tplg_dapm_pcm_dai | N |
389 * +-----------------------------------+-----+
390 */
391struct snd_soc_tplg_pcm_dai {
392 __le32 size; /* in bytes of this structure */
393 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
394 __le32 id; /* unique ID - used to match */
395 __le32 playback; /* supports playback mode */
396 __le32 capture; /* supports capture mode */
397 __le32 compress; /* 1 = compressed; 0 = PCM */
398 struct snd_soc_tplg_pcm_cfg_caps capconf[2]; /* capabilities and configs */
399} __attribute__((packed));
400
401#endif
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
new file mode 100644
index 000000000000..ffc4f203146c
--- /dev/null
+++ b/include/uapi/sound/tlv.h
@@ -0,0 +1,31 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef __UAPI_SOUND_TLV_H
14#define __UAPI_SOUND_TLV_H
15
16#define SNDRV_CTL_TLVT_CONTAINER 0 /* one level down - group of TLVs */
17#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */
18#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */
19#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */
20#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
21#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
22
23/*
24 * channel-mapping TLV items
25 * TLV length must match with num_channels
26 */
27#define SNDRV_CTL_TLVT_CHMAP_FIXED 0x101 /* fixed channel position */
28#define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */
29#define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */
30
31#endif
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
new file mode 100644
index 000000000000..3696575b02f2
--- /dev/null
+++ b/include/video/exynos5433_decon.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright (C) 2014 Samsung Electronics Co.Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundationr
7 */
8
9#ifndef EXYNOS_REGS_DECON_H
10#define EXYNOS_REGS_DECON_H
11
12/* Exynos543X DECON */
13#define DECON_VIDCON0 0x0000
14#define DECON_VIDOUTCON0 0x0010
15#define DECON_WINCONx(n) (0x0020 + ((n) * 4))
16#define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4))
17#define DECON_SHADOWCON 0x00A0
18#define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20))
19#define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20))
20#define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20))
21#define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20))
22#define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20))
23#define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10))
24#define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10))
25#define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10))
26#define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10))
27#define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10))
28#define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10))
29#define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4))
30#define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4))
31#define DECON_VIDINTCON0 0x0220
32#define DECON_VIDINTCON1 0x0224
33#define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8))
34#define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8))
35#define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4))
36#define DECON_WINxMAP(n) (0x0270 + ((n) * 4))
37#define DECON_QOSLUT07_00 0x02C0
38#define DECON_QOSLUT15_08 0x02C4
39#define DECON_QOSCTRL 0x02C8
40#define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4))
41#define DECON_BLENDCON 0x0310
42#define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4))
43#define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4))
44#define DECON_FRAMEFIFO_REG7 0x051C
45#define DECON_FRAMEFIFO_REG8 0x0520
46#define DECON_FRAMEFIFO_STATUS 0x0524
47#define DECON_CMU 0x1404
48#define DECON_UPDATE 0x1410
49#define DECON_UPDATE_SCHEME 0x1438
50#define DECON_VIDCON1 0x2000
51#define DECON_VIDCON2 0x2004
52#define DECON_VIDCON3 0x2008
53#define DECON_VIDCON4 0x200C
54#define DECON_VIDTCON2 0x2028
55#define DECON_FRAME_SIZE 0x2038
56#define DECON_LINECNT_OP_THRESHOLD 0x203C
57#define DECON_TRIGCON 0x2040
58#define DECON_TRIGSKIP 0x2050
59#define DECON_CRCRDATA 0x20B0
60#define DECON_CRCCTRL 0x20B4
61
62/* Exynos5430 DECON */
63#define DECON_VIDTCON0 0x2020
64#define DECON_VIDTCON1 0x2024
65
66/* Exynos5433 DECON */
67#define DECON_VIDTCON00 0x2010
68#define DECON_VIDTCON01 0x2014
69#define DECON_VIDTCON10 0x2018
70#define DECON_VIDTCON11 0x201C
71
72/* Exynos543X DECON Internal */
73#define DECON_W013DSTREOCON 0x0320
74#define DECON_W233DSTREOCON 0x0324
75#define DECON_FRAMEFIFO_REG0 0x0500
76#define DECON_ENHANCER_CTRL 0x2100
77
78/* Exynos543X DECON TV */
79#define DECON_VCLKCON0 0x0014
80#define DECON_VIDINTCON2 0x0228
81#define DECON_VIDINTCON3 0x022C
82
83/* VIDCON0 */
84#define VIDCON0_SWRESET (1 << 28)
85#define VIDCON0_STOP_STATUS (1 << 2)
86#define VIDCON0_ENVID (1 << 1)
87#define VIDCON0_ENVID_F (1 << 0)
88
89/* VIDOUTCON0 */
90#define VIDOUT_LCD_ON (1 << 24)
91#define VIDOUT_IF_F_MASK (0x3 << 20)
92#define VIDOUT_RGB_IF (0x0 << 20)
93#define VIDOUT_COMMAND_IF (0x2 << 20)
94
95/* WINCONx */
96#define WINCONx_HAWSWP_F (1 << 16)
97#define WINCONx_WSWP_F (1 << 15)
98#define WINCONx_BURSTLEN_MASK (0x3 << 10)
99#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
100#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
101#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
102#define WINCONx_BLD_PIX_F (1 << 6)
103#define WINCONx_BPPMODE_MASK (0xf << 2)
104#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
105#define WINCONx_BPPMODE_16BPP_A1555 (0x6 << 2)
106#define WINCONx_BPPMODE_16BPP_I1555 (0x7 << 2)
107#define WINCONx_BPPMODE_24BPP_888 (0xb << 2)
108#define WINCONx_BPPMODE_24BPP_A1887 (0xc << 2)
109#define WINCONx_BPPMODE_25BPP_A1888 (0xd << 2)
110#define WINCONx_BPPMODE_32BPP_A8888 (0xd << 2)
111#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
112#define WINCONx_ALPHA_SEL_F (1 << 1)
113#define WINCONx_ENWIN_F (1 << 0)
114
115/* SHADOWCON */
116#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
117
118/* VIDOSDxD */
119#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
120#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
121#define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0)
122
123/* VIDINTCON0 */
124#define VIDINTCON0_FRAMEDONE (1 << 17)
125#define VIDINTCON0_INTFRMEN (1 << 12)
126#define VIDINTCON0_INTEN (1 << 0)
127
128/* VIDINTCON1 */
129#define VIDINTCON1_INTFRMDONEPEND (1 << 2)
130#define VIDINTCON1_INTFRMPEND (1 << 1)
131#define VIDINTCON1_INTFIFOPEND (1 << 0)
132
133/* DECON_CMU */
134#define CMU_CLKGAGE_MODE_SFR_F (1 << 1)
135#define CMU_CLKGAGE_MODE_MEM_F (1 << 0)
136
137/* DECON_UPDATE */
138#define STANDALONE_UPDATE_F (1 << 0)
139
140/* DECON_VIDTCON00 */
141#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16)
142#define VIDTCON00_VFPD_F(x) ((x) & 0xfff)
143
144/* DECON_VIDTCON01 */
145#define VIDTCON01_VSPW_F(x) (((x) & 0xfff) << 16)
146
147/* DECON_VIDTCON10 */
148#define VIDTCON10_HBPD_F(x) (((x) & 0xfff) << 16)
149#define VIDTCON10_HFPD_F(x) ((x) & 0xfff)
150
151/* DECON_VIDTCON11 */
152#define VIDTCON11_HSPW_F(x) (((x) & 0xfff) << 16)
153
154/* DECON_VIDTCON2 */
155#define VIDTCON2_LINEVAL(x) (((x) & 0xfff) << 16)
156#define VIDTCON2_HOZVAL(x) ((x) & 0xfff)
157
158/* TRIGCON */
159#define TRIGCON_TRIGEN_PER_F (1 << 31)
160#define TRIGCON_TRIGEN_F (1 << 30)
161#define TRIGCON_TE_AUTO_MASK (1 << 29)
162#define TRIGCON_SWTRIGCMD (1 << 1)
163#define TRIGCON_SWTRIGEN (1 << 0)
164
165#endif /* EXYNOS_REGS_DECON_H */
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index bc5013e8059d..91e225a6107d 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -159,10 +159,7 @@ struct neofb_par {
159 unsigned char VCLK3NumeratorHigh; 159 unsigned char VCLK3NumeratorHigh;
160 unsigned char VCLK3Denominator; 160 unsigned char VCLK3Denominator;
161 unsigned char VerticalExt; 161 unsigned char VerticalExt;
162 162 int wc_cookie;
163#ifdef CONFIG_MTRR
164 int mtrr;
165#endif
166 u8 __iomem *mmio_vbase; 163 u8 __iomem *mmio_vbase;
167 u8 cursorOff; 164 u8 cursorOff;
168 u8 *cursorPad; /* Must die !! */ 165 u8 *cursorPad; /* Must die !! */
diff --git a/include/video/tdfx.h b/include/video/tdfx.h
index befbaf0a92d8..69674b94bb68 100644
--- a/include/video/tdfx.h
+++ b/include/video/tdfx.h
@@ -196,7 +196,7 @@ struct tdfx_par {
196 u32 palette[16]; 196 u32 palette[16];
197 void __iomem *regbase_virt; 197 void __iomem *regbase_virt;
198 unsigned long iobase; 198 unsigned long iobase;
199 int mtrr_handle; 199 int wc_cookie;
200#ifdef CONFIG_FB_3DFX_I2C 200#ifdef CONFIG_FB_3DFX_I2C
201 struct tdfxfb_i2c_chan chan[2]; 201 struct tdfxfb_i2c_chan chan[2];
202#endif 202#endif