aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acnames.h4
-rw-r--r--include/acpi/acpi_bus.h6
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/actbl1.h19
-rw-r--r--include/acpi/actbl3.h9
-rw-r--r--include/asm-generic/atomic.h194
-rw-r--r--include/asm-generic/atomic64.h20
-rw-r--r--include/asm-generic/cputime_jiffies.h2
-rw-r--r--include/asm-generic/cputime_nsecs.h2
-rw-r--r--include/asm-generic/dma-mapping-common.h17
-rw-r--r--include/asm-generic/gpio.h2
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/irq_work.h10
-rw-r--r--include/asm-generic/pgtable.h31
-rw-r--r--include/asm-generic/sections.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/drbg.h32
-rw-r--r--include/crypto/internal/hash.h9
-rw-r--r--include/crypto/mcryptd.h112
-rw-r--r--include/crypto/public_key.h6
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h35
-rw-r--r--include/dt-bindings/clock/imx6sl-clock.h30
-rw-r--r--include/dt-bindings/clock/imx6sx-clock.h25
-rw-r--r--include/dt-bindings/clock/r8a7740-clock.h77
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h80
-rw-r--r--include/dt-bindings/clock/vf610-clock.h5
-rw-r--r--include/dt-bindings/input/ti-drv260x.h36
-rw-r--r--include/dt-bindings/pinctrl/at91.h5
-rw-r--r--include/dt-bindings/pinctrl/rockchip.h2
-rw-r--r--include/dt-bindings/sound/cs35l32.h26
-rw-r--r--include/keys/asymmetric-type.h41
-rw-r--r--include/keys/user-type.h1
-rw-r--r--include/kvm/arm_vgic.h112
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/ahci_platform.h13
-rw-r--r--include/linux/amba/bus.h5
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/atmel-mci.h2
-rw-r--r--include/linux/atmel_tc.h13
-rw-r--r--include/linux/atomic.h36
-rw-r--r--include/linux/balloon_compaction.h169
-rw-r--r--include/linux/bcma/bcma.h6
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h8
-rw-r--r--include/linux/bcma/bcma_regs.h5
-rw-r--r--include/linux/bcma/bcma_soc.h1
-rw-r--r--include/linux/bitops.h20
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bpf.h136
-rw-r--r--include/linux/brcmphy.h137
-rw-r--r--include/linux/ccp.h12
-rw-r--r--include/linux/cgroup.h26
-rw-r--r--include/linux/clk.h2
-rw-r--r--include/linux/clk/at91_pmc.h1
-rw-r--r--include/linux/com20020.h29
-rw-r--r--include/linux/compaction.h24
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/cpuset.h7
-rw-r--r--include/linux/cycx_x25.h125
-rw-r--r--include/linux/dcache.h10
-rw-r--r--include/linux/devcoredump.h35
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/dma-mapping.h26
-rw-r--r--include/linux/dmaengine.h22
-rw-r--r--include/linux/dynamic_debug.h12
-rw-r--r--include/linux/dynamic_queue_limits.h12
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/extcon/extcon-gpio.h4
-rw-r--r--include/linux/extcon/sm5502.h287
-rw-r--r--include/linux/f2fs_fs.h22
-rw-r--r--include/linux/filter.h196
-rw-r--r--include/linux/flex_proportions.h5
-rw-r--r--include/linux/fs.h64
-rw-r--r--include/linux/fs_enet_pd.h1
-rw-r--r--include/linux/fsl_ifc.h6
-rw-r--r--include/linux/ftrace.h10
-rw-r--r--include/linux/genalloc.h7
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/consumer.h105
-rw-r--r--include/linux/gpio/driver.h12
-rw-r--r--include/linux/hash.h4
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/i2c.h16
-rw-r--r--include/linux/i82593.h229
-rw-r--r--include/linux/ieee80211.h73
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/ipack.h24
-rw-r--r--include/linux/irq.h8
-rw-r--r--include/linux/irq_work.h3
-rw-r--r--include/linux/irqchip/arm-gic.h16
-rw-r--r--include/linux/irqchip/irq-omap-intc.h32
-rw-r--r--include/linux/irqdesc.h29
-rw-r--r--include/linux/jbd2.h30
-rw-r--r--include/linux/jiffies.h12
-rw-r--r--include/linux/jump_label.h17
-rw-r--r--include/linux/kernel.h57
-rw-r--r--include/linux/key-type.h34
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/kvm_host.h31
-rw-r--r--include/linux/kvm_types.h14
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/libata.h12
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mei_cl_bus.h1
-rw-r--r--include/linux/memcontrol.h15
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mfd/max77693-private.h9
-rw-r--r--include/linux/mfd/samsung/core.h21
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h12
-rw-r--r--include/linux/mfd/samsung/s2mps11.h9
-rw-r--r--include/linux/mfd/samsung/s2mps14.h10
-rw-r--r--include/linux/mfd/tmio.h25
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/migrate.h14
-rw-r--r--include/linux/mlx4/device.h21
-rw-r--r--include/linux/mlx4/qp.h12
-rw-r--r--include/linux/mlx5/device.h152
-rw-r--r--include/linux/mlx5/driver.h118
-rw-r--r--include/linux/mlx5/mlx5_ifc.h349
-rw-r--r--include/linux/mlx5/qp.h3
-rw-r--r--include/linux/mm.h39
-rw-r--r--include/linux/mmc/card.h10
-rw-r--r--include/linux/mmc/dw_mmc.h4
-rw-r--r--include/linux/mmc/host.h12
-rw-r--r--include/linux/mmc/mmc.h7
-rw-r--r--include/linux/mmc/sdhci.h3
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmdebug.h20
-rw-r--r--include/linux/mmu_notifier.h24
-rw-r--r--include/linux/mmzone.h51
-rw-r--r--include/linux/moduleparam.h50
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/netdevice.h252
-rw-r--r--include/linux/netfilter.h5
-rw-r--r--include/linux/netfilter/ipset/ip_set.h60
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h1
-rw-r--r--include/linux/netfilter_bridge.h50
-rw-r--r--include/linux/nfs4.h26
-rw-r--r--include/linux/nfs_fs.h41
-rw-r--r--include/linux/nfs_xdr.h17
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_address.h27
-rw-r--r--include/linux/of_pci.h13
-rw-r--r--include/linux/omap-dma.h37
-rw-r--r--include/linux/pagemap.h32
-rw-r--r--include/linux/pci.h66
-rw-r--r--include/linux/pci_hotplug.h2
-rw-r--r--include/linux/pci_ids.h18
-rw-r--r--include/linux/percpu-refcount.h123
-rw-r--r--include/linux/percpu.h13
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/perf_event.h14
-rw-r--r--include/linux/phonedev.h25
-rw-r--r--include/linux/phy.h27
-rw-r--r--include/linux/phy_fixed.h31
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/pinctrl/pinmux.h7
-rw-r--r--include/linux/platform_data/drv260x-pdata.h28
-rw-r--r--include/linux/platform_data/gpio-dwapb.h32
-rw-r--r--include/linux/platform_data/isl9305.h30
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h13
-rw-r--r--include/linux/platform_data/samsung-usbphy.h27
-rw-r--r--include/linux/platform_data/tegra_emc.h34
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/pm_domain.h132
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/proportions.h5
-rw-r--r--include/linux/random.h4
-rw-r--r--include/linux/rcupdate.h106
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/regulator/consumer.h20
-rw-r--r--include/linux/regulator/da9211.h9
-rw-r--r--include/linux/regulator/driver.h14
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/max1586.h2
-rw-r--r--include/linux/rhashtable.h2
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rtnetlink.h10
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/sched.h99
-rw-r--r--include/linux/screen_info.h8
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/seqlock.h19
-rw-r--r--include/linux/seqno-fence.h1
-rw-r--r--include/linux/serial_8250.h6
-rw-r--r--include/linux/serial_core.h20
-rw-r--r--include/linux/skbuff.h347
-rw-r--r--include/linux/slab.h64
-rw-r--r--include/linux/slab_def.h20
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/soc/ti/knav_dma.h175
-rw-r--r--include/linux/soc/ti/knav_qmss.h90
-rw-r--r--include/linux/spi/mcp23s08.h18
-rw-r--r--include/linux/spi/spi.h7
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/swap.h22
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/tcp.h10
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/tick.h9
-rw-r--r--include/linux/topology.h17
-rw-r--r--include/linux/torture.h5
-rw-r--r--include/linux/tracepoint.h11
-rw-r--r--include/linux/tty.h15
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h5
-rw-r--r--include/linux/usb.h12
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/gadget.h26
-rw-r--r--include/linux/usb/hcd.h4
-rw-r--r--include/linux/usb/of.h5
-rw-r--r--include/linux/usb/quirks.h22
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/linux/vgaarb.h2
-rw-r--r--include/linux/vm_event_item.h7
-rw-r--r--include/linux/wait.h21
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/davinci/dm644x_ccdc.h2
-rw-r--r--include/media/omap3isp.h3
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/videobuf2-core.h21
-rw-r--r--include/misc/cxl.h48
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/ah.h3
-rw-r--r--include/net/bluetooth/bluetooth.h5
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h25
-rw-r--r--include/net/bluetooth/l2cap.h35
-rw-r--r--include/net/cfg80211.h69
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/codel.h2
-rw-r--r--include/net/dsa.h95
-rw-r--r--include/net/dst.h16
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/flow_keys.h16
-rw-r--r--include/net/gen_stats.h15
-rw-r--r--include/net/genetlink.h8
-rw-r--r--include/net/geneve.h97
-rw-r--r--include/net/gue.h23
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h9
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip.h29
-rw-r--r--include/net/ip6_checksum.h8
-rw-r--r--include/net/ip6_fib.h25
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_tunnels.h38
-rw-r--r--include/net/ip_vs.h223
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/mac80211.h34
-rw-r--r--include/net/mld.h5
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h20
-rw-r--r--include/net/netdma.h32
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h14
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h119
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h157
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h75
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nft_masq.h16
-rw-r--r--include/net/netfilter/nft_reject.h9
-rw-r--r--include/net/netns/ieee802154_6lowpan.h1
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/xfrm.h14
-rw-r--r--include/net/nfc/nci.h16
-rw-r--r--include/net/nfc/nci_core.h9
-rw-r--r--include/net/pkt_cls.h18
-rw-r--r--include/net/pkt_sched.h8
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/net/sch_generic.h122
-rw-r--r--include/net/sctp/command.h2
-rw-r--r--include/net/sctp/sctp.h13
-rw-r--r--include/net/snmp.h8
-rw-r--r--include/net/sock.h39
-rw-r--r--include/net/tcp.h94
-rw-r--r--include/net/udp.h21
-rw-r--r--include/net/udp_tunnel.h85
-rw-r--r--include/net/wimax.h2
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/ras/ras_event.h48
-rw-r--r--include/rdma/ib_umem.h1
-rw-r--r--include/rxrpc/types.h41
-rw-r--r--include/scsi/scsi.h1
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_tcq.h2
-rw-r--r--include/sound/pcm.h52
-rw-r--r--include/sound/rt5645.h3
-rw-r--r--include/sound/rt5677.h13
-rw-r--r--include/sound/soc-dapm.h5
-rw-r--r--include/sound/soc.h103
-rw-r--r--include/sound/vx_core.h7
-rw-r--r--include/trace/events/asoc.h6
-rw-r--r--include/trace/events/btrfs.h85
-rw-r--r--include/trace/events/f2fs.h16
-rw-r--r--include/trace/events/filelock.h14
-rw-r--r--include/trace/events/irq.h4
-rw-r--r--include/trace/events/kvm.h36
-rw-r--r--include/trace/events/rcu.h3
-rw-r--r--include/uapi/Kbuild1
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/Kbuild4
-rw-r--r--include/uapi/linux/bpf.h155
-rw-r--r--include/uapi/linux/ethtool.h29
-rw-r--r--include/uapi/linux/fou.h39
-rw-r--r--include/uapi/linux/genwqe/genwqe_card.h2
-rw-r--r--include/uapi/linux/hyperv.h2
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h24
-rw-r--r--include/uapi/linux/if_tunnel.h17
-rw-r--r--include/uapi/linux/inet_diag.h13
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/ip_vs.h3
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/kvm.h28
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h12
-rw-r--r--include/uapi/linux/netfilter/nf_nat.h5
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h59
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_acct.h8
-rw-r--r--include/uapi/linux/netfilter/xt_set.h10
-rw-r--r--include/uapi/linux/netfilter_arp/arpt_mangle.h2
-rw-r--r--include/uapi/linux/nfsd/export.h5
-rw-r--r--include/uapi/linux/nl80211.h116
-rw-r--r--include/uapi/linux/openvswitch.h31
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--include/uapi/linux/prctl.h27
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/smiapp.h29
-rw-r--r--include/uapi/linux/uhid.h120
-rw-r--r--include/uapi/linux/usb/functionfs.h19
-rw-r--r--include/uapi/linux/usbip.h26
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h9
-rw-r--r--include/uapi/linux/vfio.h3
-rw-r--r--include/uapi/linux/videodev2.h13
-rw-r--r--include/uapi/linux/wil6210_uapi.h87
-rw-r--r--include/uapi/linux/xattr.h2
-rw-r--r--include/uapi/linux/xfrm.h7
-rw-r--r--include/uapi/misc/Kbuild2
-rw-r--r--include/uapi/misc/cxl.h88
-rw-r--r--include/uapi/sound/asound.h3
-rw-r--r--include/video/imx-ipu-v3.h188
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/interface/elfnote.h48
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--include/xen/interface/io/vscsiif.h229
-rw-r--r--include/xen/interface/xen.h272
-rw-r--r--include/xen/xenbus.h21
377 files changed, 7196 insertions, 3680 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index c728113374f5..f97804bdf1ff 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -59,6 +59,10 @@
59#define METHOD_NAME__PRS "_PRS" 59#define METHOD_NAME__PRS "_PRS"
60#define METHOD_NAME__PRT "_PRT" 60#define METHOD_NAME__PRT "_PRT"
61#define METHOD_NAME__PRW "_PRW" 61#define METHOD_NAME__PRW "_PRW"
62#define METHOD_NAME__PS0 "_PS0"
63#define METHOD_NAME__PS1 "_PS1"
64#define METHOD_NAME__PS2 "_PS2"
65#define METHOD_NAME__PS3 "_PS3"
62#define METHOD_NAME__REG "_REG" 66#define METHOD_NAME__REG "_REG"
63#define METHOD_NAME__SB_ "_SB_" 67#define METHOD_NAME__SB_ "_SB_"
64#define METHOD_NAME__SEG "_SEG" 68#define METHOD_NAME__SEG "_SEG"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index bcfd808b1098..57ee0528aacb 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -118,6 +118,7 @@ struct acpi_device;
118struct acpi_hotplug_profile { 118struct acpi_hotplug_profile {
119 struct kobject kobj; 119 struct kobject kobj;
120 int (*scan_dependent)(struct acpi_device *adev); 120 int (*scan_dependent)(struct acpi_device *adev);
121 void (*notify_online)(struct acpi_device *adev);
121 bool enabled:1; 122 bool enabled:1;
122 bool demand_offline:1; 123 bool demand_offline:1;
123}; 124};
@@ -204,10 +205,9 @@ struct acpi_device_flags {
204 u32 match_driver:1; 205 u32 match_driver:1;
205 u32 initialized:1; 206 u32 initialized:1;
206 u32 visited:1; 207 u32 visited:1;
207 u32 no_hotplug:1;
208 u32 hotplug_notify:1; 208 u32 hotplug_notify:1;
209 u32 is_dock_station:1; 209 u32 is_dock_station:1;
210 u32 reserved:22; 210 u32 reserved:23;
211}; 211};
212 212
213/* File System */ 213/* File System */
@@ -246,7 +246,6 @@ struct acpi_device_pnp {
246 acpi_device_name device_name; /* Driver-determined */ 246 acpi_device_name device_name; /* Driver-determined */
247 acpi_device_class device_class; /* " */ 247 acpi_device_class device_class; /* " */
248 union acpi_object *str_obj; /* unicode string for _STR method */ 248 union acpi_object *str_obj; /* unicode string for _STR method */
249 unsigned long sun; /* _SUN */
250}; 249};
251 250
252#define acpi_device_bid(d) ((d)->pnp.bus_id) 251#define acpi_device_bid(d) ((d)->pnp.bus_id)
@@ -412,7 +411,6 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
412int acpi_bus_get_private_data(acpi_handle, void **); 411int acpi_bus_get_private_data(acpi_handle, void **);
413int acpi_bus_attach_private_data(acpi_handle, void *); 412int acpi_bus_attach_private_data(acpi_handle, void *);
414void acpi_bus_detach_private_data(acpi_handle); 413void acpi_bus_detach_private_data(acpi_handle);
415void acpi_bus_no_hotplug(acpi_handle handle);
416extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); 414extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
417extern int register_acpi_notifier(struct notifier_block *); 415extern int register_acpi_notifier(struct notifier_block *);
418extern int unregister_acpi_notifier(struct notifier_block *); 416extern int unregister_acpi_notifier(struct notifier_block *);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index b7c89d47efbe..9fc1d71c82bc 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20140724 49#define ACPI_CA_VERSION 0x20140828
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -692,6 +692,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
692 *event_status)) 692 *event_status))
693ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) 693ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
694ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) 694ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
695ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
695 696
696ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 697ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
697 acpi_get_gpe_device(u32 gpe_index, 698 acpi_get_gpe_device(u32 gpe_index,
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7626bfeac2cb..29e79370641d 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -952,7 +952,8 @@ enum acpi_srat_type {
952 ACPI_SRAT_TYPE_CPU_AFFINITY = 0, 952 ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
953 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, 953 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
954 ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, 954 ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
955 ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ 955 ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
956 ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */
956}; 957};
957 958
958/* 959/*
@@ -968,7 +969,7 @@ struct acpi_srat_cpu_affinity {
968 u32 flags; 969 u32 flags;
969 u8 local_sapic_eid; 970 u8 local_sapic_eid;
970 u8 proximity_domain_hi[3]; 971 u8 proximity_domain_hi[3];
971 u32 reserved; /* Reserved, must be zero */ 972 u32 clock_domain;
972}; 973};
973 974
974/* Flags */ 975/* Flags */
@@ -1010,6 +1011,20 @@ struct acpi_srat_x2apic_cpu_affinity {
1010 1011
1011#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ 1012#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
1012 1013
1014/* 3: GICC Affinity (ACPI 5.1) */
1015
1016struct acpi_srat_gicc_affinity {
1017 struct acpi_subtable_header header;
1018 u32 proximity_domain;
1019 u32 acpi_processor_uid;
1020 u32 flags;
1021 u32 clock_domain;
1022};
1023
1024/* Flags for struct acpi_srat_gicc_affinity */
1025
1026#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
1027
1013/* Reset to default packing */ 1028/* Reset to default packing */
1014 1029
1015#pragma pack() 1030#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 787bcc814463..5480cb2236bf 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -310,10 +310,15 @@ struct acpi_gtdt_timer_entry {
310 u32 common_flags; 310 u32 common_flags;
311}; 311};
312 312
313/* Flag Definitions: timer_flags and virtual_timer_flags above */
314
315#define ACPI_GTDT_GT_IRQ_MODE (1)
316#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1)
317
313/* Flag Definitions: common_flags above */ 318/* Flag Definitions: common_flags above */
314 319
315#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) 320#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
316#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) 321#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
317 322
318/* 1: SBSA Generic Watchdog Structure */ 323/* 1: SBSA Generic Watchdog Structure */
319 324
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 9c79e7603459..1973ad2b13f4 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -18,14 +18,100 @@
18#include <asm/cmpxchg.h> 18#include <asm/cmpxchg.h>
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20 20
21/*
22 * atomic_$op() - $op integer to atomic variable
23 * @i: integer value to $op
24 * @v: pointer to the atomic variable
25 *
26 * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
27 * smp_mb__{before,after}_atomic().
28 */
29
30/*
31 * atomic_$op_return() - $op interer to atomic variable and returns the result
32 * @i: integer value to $op
33 * @v: pointer to the atomic variable
34 *
35 * Atomically $ops @i to @v. Does imply a full memory barrier.
36 */
37
21#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
22/* Force people to define core atomics */ 39
23# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ 40/* we can build all atomic primitives from cmpxchg */
24 !defined(atomic_clear_mask) || !defined(atomic_set_mask) 41
25# error "SMP requires a little arch-specific magic" 42#define ATOMIC_OP(op, c_op) \
26# endif 43static inline void atomic_##op(int i, atomic_t *v) \
44{ \
45 int c, old; \
46 \
47 c = v->counter; \
48 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
49 c = old; \
50}
51
52#define ATOMIC_OP_RETURN(op, c_op) \
53static inline int atomic_##op##_return(int i, atomic_t *v) \
54{ \
55 int c, old; \
56 \
57 c = v->counter; \
58 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
59 c = old; \
60 \
61 return c c_op i; \
62}
63
64#else
65
66#include <linux/irqflags.h>
67
68#define ATOMIC_OP(op, c_op) \
69static inline void atomic_##op(int i, atomic_t *v) \
70{ \
71 unsigned long flags; \
72 \
73 raw_local_irq_save(flags); \
74 v->counter = v->counter c_op i; \
75 raw_local_irq_restore(flags); \
76}
77
78#define ATOMIC_OP_RETURN(op, c_op) \
79static inline int atomic_##op##_return(int i, atomic_t *v) \
80{ \
81 unsigned long flags; \
82 int ret; \
83 \
84 raw_local_irq_save(flags); \
85 ret = (v->counter = v->counter c_op i); \
86 raw_local_irq_restore(flags); \
87 \
88 return ret; \
89}
90
91#endif /* CONFIG_SMP */
92
93#ifndef atomic_add_return
94ATOMIC_OP_RETURN(add, +)
95#endif
96
97#ifndef atomic_sub_return
98ATOMIC_OP_RETURN(sub, -)
99#endif
100
101#ifndef atomic_clear_mask
102ATOMIC_OP(and, &)
103#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
27#endif 104#endif
28 105
106#ifndef atomic_set_mask
107#define CONFIG_ARCH_HAS_ATOMIC_OR
108ATOMIC_OP(or, |)
109#define atomic_set_mask(i, v) atomic_or((i), (v))
110#endif
111
112#undef ATOMIC_OP_RETURN
113#undef ATOMIC_OP
114
29/* 115/*
30 * Atomic operations that C can't guarantee us. Useful for 116 * Atomic operations that C can't guarantee us. Useful for
31 * resource counting etc.. 117 * resource counting etc..
@@ -33,8 +119,6 @@
33 119
34#define ATOMIC_INIT(i) { (i) } 120#define ATOMIC_INIT(i) { (i) }
35 121
36#ifdef __KERNEL__
37
38/** 122/**
39 * atomic_read - read atomic variable 123 * atomic_read - read atomic variable
40 * @v: pointer of type atomic_t 124 * @v: pointer of type atomic_t
@@ -42,7 +126,7 @@
42 * Atomically reads the value of @v. 126 * Atomically reads the value of @v.
43 */ 127 */
44#ifndef atomic_read 128#ifndef atomic_read
45#define atomic_read(v) (*(volatile int *)&(v)->counter) 129#define atomic_read(v) ACCESS_ONCE((v)->counter)
46#endif 130#endif
47 131
48/** 132/**
@@ -56,52 +140,6 @@
56 140
57#include <linux/irqflags.h> 141#include <linux/irqflags.h>
58 142
59/**
60 * atomic_add_return - add integer to atomic variable
61 * @i: integer value to add
62 * @v: pointer of type atomic_t
63 *
64 * Atomically adds @i to @v and returns the result
65 */
66#ifndef atomic_add_return
67static inline int atomic_add_return(int i, atomic_t *v)
68{
69 unsigned long flags;
70 int temp;
71
72 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
73 temp = v->counter;
74 temp += i;
75 v->counter = temp;
76 raw_local_irq_restore(flags);
77
78 return temp;
79}
80#endif
81
82/**
83 * atomic_sub_return - subtract integer from atomic variable
84 * @i: integer value to subtract
85 * @v: pointer of type atomic_t
86 *
87 * Atomically subtracts @i from @v and returns the result
88 */
89#ifndef atomic_sub_return
90static inline int atomic_sub_return(int i, atomic_t *v)
91{
92 unsigned long flags;
93 int temp;
94
95 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
96 temp = v->counter;
97 temp -= i;
98 v->counter = temp;
99 raw_local_irq_restore(flags);
100
101 return temp;
102}
103#endif
104
105static inline int atomic_add_negative(int i, atomic_t *v) 143static inline int atomic_add_negative(int i, atomic_t *v)
106{ 144{
107 return atomic_add_return(i, v) < 0; 145 return atomic_add_return(i, v) < 0;
@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v)
139 177
140static inline int __atomic_add_unless(atomic_t *v, int a, int u) 178static inline int __atomic_add_unless(atomic_t *v, int a, int u)
141{ 179{
142 int c, old; 180 int c, old;
143 c = atomic_read(v); 181 c = atomic_read(v);
144 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 182 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
145 c = old; 183 c = old;
146 return c; 184 return c;
147}
148
149/**
150 * atomic_clear_mask - Atomically clear bits in atomic variable
151 * @mask: Mask of the bits to be cleared
152 * @v: pointer of type atomic_t
153 *
154 * Atomically clears the bits set in @mask from @v
155 */
156#ifndef atomic_clear_mask
157static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
158{
159 unsigned long flags;
160
161 mask = ~mask;
162 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
163 v->counter &= mask;
164 raw_local_irq_restore(flags);
165} 185}
166#endif
167
168/**
169 * atomic_set_mask - Atomically set bits in atomic variable
170 * @mask: Mask of the bits to be set
171 * @v: pointer of type atomic_t
172 *
173 * Atomically sets the bits set in @mask in @v
174 */
175#ifndef atomic_set_mask
176static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
177{
178 unsigned long flags;
179
180 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
181 v->counter |= mask;
182 raw_local_irq_restore(flags);
183}
184#endif
185 186
186#endif /* __KERNEL__ */
187#endif /* __ASM_GENERIC_ATOMIC_H */ 187#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f9ee3d..30ad9c86cebb 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -20,10 +20,22 @@ typedef struct {
20 20
21extern long long atomic64_read(const atomic64_t *v); 21extern long long atomic64_read(const atomic64_t *v);
22extern void atomic64_set(atomic64_t *v, long long i); 22extern void atomic64_set(atomic64_t *v, long long i);
23extern void atomic64_add(long long a, atomic64_t *v); 23
24extern long long atomic64_add_return(long long a, atomic64_t *v); 24#define ATOMIC64_OP(op) \
25extern void atomic64_sub(long long a, atomic64_t *v); 25extern void atomic64_##op(long long a, atomic64_t *v);
26extern long long atomic64_sub_return(long long a, atomic64_t *v); 26
27#define ATOMIC64_OP_RETURN(op) \
28extern long long atomic64_##op##_return(long long a, atomic64_t *v);
29
30#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
31
32ATOMIC64_OPS(add)
33ATOMIC64_OPS(sub)
34
35#undef ATOMIC64_OPS
36#undef ATOMIC64_OP_RETURN
37#undef ATOMIC64_OP
38
27extern long long atomic64_dec_if_positive(atomic64_t *v); 39extern long long atomic64_dec_if_positive(atomic64_t *v);
28extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); 40extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
29extern long long atomic64_xchg(atomic64_t *v, long long new); 41extern long long atomic64_xchg(atomic64_t *v, long long new);
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
index d5cb78f53986..fe386fc6e85e 100644
--- a/include/asm-generic/cputime_jiffies.h
+++ b/include/asm-generic/cputime_jiffies.h
@@ -3,6 +3,8 @@
3 3
4typedef unsigned long __nocast cputime_t; 4typedef unsigned long __nocast cputime_t;
5 5
6#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
7
6#define cputime_one_jiffy jiffies_to_cputime(1) 8#define cputime_one_jiffy jiffies_to_cputime(1)
7#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) 9#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
8#define cputime_to_scaled(__ct) (__ct) 10#define cputime_to_scaled(__ct) (__ct)
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 4e817606c549..0419485891f2 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -21,6 +21,8 @@
21typedef u64 __nocast cputime_t; 21typedef u64 __nocast cputime_t;
22typedef u64 __nocast cputime64_t; 22typedef u64 __nocast cputime64_t;
23 23
24#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
25
24#define cputime_one_jiffy jiffies_to_cputime(1) 26#define cputime_one_jiffy jiffies_to_cputime(1)
25 27
26#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor) 28#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index de8bf89940f8..3378dcf4c31e 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
179extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 179extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
180 void *cpu_addr, dma_addr_t dma_addr, size_t size); 180 void *cpu_addr, dma_addr_t dma_addr, size_t size);
181 181
182void *dma_common_contiguous_remap(struct page *page, size_t size,
183 unsigned long vm_flags,
184 pgprot_t prot, const void *caller);
185
186void *dma_common_pages_remap(struct page **pages, size_t size,
187 unsigned long vm_flags, pgprot_t prot,
188 const void *caller);
189void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
190
182/** 191/**
183 * dma_mmap_attrs - map a coherent DMA allocation into user space 192 * dma_mmap_attrs - map a coherent DMA allocation into user space
184 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 193 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -205,14 +214,6 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
205 214
206#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) 215#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
207 216
208static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
209 void *cpu_addr, dma_addr_t dma_addr, size_t size)
210{
211 DEFINE_DMA_ATTRS(attrs);
212 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
213 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
214}
215
216int 217int
217dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 218dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
218 void *cpu_addr, dma_addr_t dma_addr, size_t size); 219 void *cpu_addr, dma_addr_t dma_addr, size_t size);
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c1d4105e1c1d..383ade1a211b 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -27,7 +27,7 @@
27 */ 27 */
28 28
29#ifndef ARCH_NR_GPIOS 29#ifndef ARCH_NR_GPIOS
30#define ARCH_NR_GPIOS 256 30#define ARCH_NR_GPIOS 512
31#endif 31#endif
32 32
33/* 33/*
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 975e1cc75edb..b8fdc57a7335 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -331,7 +331,7 @@ static inline void iounmap(void __iomem *addr)
331#ifndef CONFIG_GENERIC_IOMAP 331#ifndef CONFIG_GENERIC_IOMAP
332static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 332static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
333{ 333{
334 return (void __iomem *) port; 334 return PCI_IOBASE + (port & IO_SPACE_LIMIT);
335} 335}
336 336
337static inline void ioport_unmap(void __iomem *p) 337static inline void ioport_unmap(void __iomem *p)
diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h
new file mode 100644
index 000000000000..a44f452c6590
--- /dev/null
+++ b/include/asm-generic/irq_work.h
@@ -0,0 +1,10 @@
1#ifndef __ASM_IRQ_WORK_H
2#define __ASM_IRQ_WORK_H
3
4static inline bool arch_irq_work_has_interrupt(void)
5{
6 return false;
7}
8
9#endif /* __ASM_IRQ_WORK_H */
10
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 53b2acc38213..081ff8826bf6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -249,6 +249,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
249#define pgprot_writecombine pgprot_noncached 249#define pgprot_writecombine pgprot_noncached
250#endif 250#endif
251 251
252#ifndef pgprot_device
253#define pgprot_device pgprot_noncached
254#endif
255
252/* 256/*
253 * When walking page tables, get the address of the next boundary, 257 * When walking page tables, get the address of the next boundary,
254 * or the end address of the range if that comes earlier. Although no 258 * or the end address of the range if that comes earlier. Although no
@@ -660,11 +664,12 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
660} 664}
661 665
662#ifdef CONFIG_NUMA_BALANCING 666#ifdef CONFIG_NUMA_BALANCING
663#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
664/* 667/*
665 * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the 668 * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
666 * same bit too). It's set only when _PAGE_PRESET is not set and it's 669 * is protected for PROT_NONE and a NUMA hinting fault entry. If the
667 * never set if _PAGE_PRESENT is set. 670 * architecture defines __PAGE_PROTNONE then it should take that into account
671 * but those that do not can rely on the fact that the NUMA hinting scanner
672 * skips inaccessible VMAs.
668 * 673 *
669 * pte/pmd_present() returns true if pte/pmd_numa returns true. Page 674 * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
670 * fault triggers on those regions if pte/pmd_numa returns true 675 * fault triggers on those regions if pte/pmd_numa returns true
@@ -673,16 +678,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
673#ifndef pte_numa 678#ifndef pte_numa
674static inline int pte_numa(pte_t pte) 679static inline int pte_numa(pte_t pte)
675{ 680{
676 return (pte_flags(pte) & 681 return ptenuma_flags(pte) == _PAGE_NUMA;
677 (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
678} 682}
679#endif 683#endif
680 684
681#ifndef pmd_numa 685#ifndef pmd_numa
682static inline int pmd_numa(pmd_t pmd) 686static inline int pmd_numa(pmd_t pmd)
683{ 687{
684 return (pmd_flags(pmd) & 688 return pmdnuma_flags(pmd) == _PAGE_NUMA;
685 (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
686} 689}
687#endif 690#endif
688 691
@@ -722,6 +725,8 @@ static inline pte_t pte_mknuma(pte_t pte)
722{ 725{
723 pteval_t val = pte_val(pte); 726 pteval_t val = pte_val(pte);
724 727
728 VM_BUG_ON(!(val & _PAGE_PRESENT));
729
725 val &= ~_PAGE_PRESENT; 730 val &= ~_PAGE_PRESENT;
726 val |= _PAGE_NUMA; 731 val |= _PAGE_NUMA;
727 732
@@ -765,16 +770,6 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
765} 770}
766#endif 771#endif
767#else 772#else
768extern int pte_numa(pte_t pte);
769extern int pmd_numa(pmd_t pmd);
770extern pte_t pte_mknonnuma(pte_t pte);
771extern pmd_t pmd_mknonnuma(pmd_t pmd);
772extern pte_t pte_mknuma(pte_t pte);
773extern pmd_t pmd_mknuma(pmd_t pmd);
774extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
775extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
776#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
777#else
778static inline int pmd_numa(pmd_t pmd) 773static inline int pmd_numa(pmd_t pmd)
779{ 774{
780 return 0; 775 return 0;
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index f1a24b5c3b90..b58fd667f87b 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -3,6 +3,8 @@
3 3
4/* References to section boundaries */ 4/* References to section boundaries */
5 5
6#include <linux/compiler.h>
7
6/* 8/*
7 * Usage guidelines: 9 * Usage guidelines:
8 * _text, _data: architecture specific, don't use them in arch-independent code 10 * _text, _data: architecture specific, don't use them in arch-independent code
@@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[];
37/* Start and end of .ctors section - used for constructor calls. */ 39/* Start and end of .ctors section - used for constructor calls. */
38extern char __ctors_start[], __ctors_end[]; 40extern char __ctors_start[], __ctors_end[];
39 41
42extern __visible const void __nosave_begin, __nosave_end;
43
40/* function descriptor handling (if any). Override 44/* function descriptor handling (if any). Override
41 * in asm/sections.h */ 45 * in asm/sections.h */
42#ifndef dereference_function_descriptor 46#ifndef dereference_function_descriptor
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 5ba0360663a7..aa70cbda327c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -40,6 +40,8 @@
40 * } 40 * }
41 * 41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init 42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
43 * [_stext, _etext] is the text section 45 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section 46 * [_sdata, _edata] is the data section
45 * 47 *
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 831d786976c5..5186f750c713 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -82,15 +82,6 @@ typedef uint32_t drbg_flag_t;
82struct drbg_core { 82struct drbg_core {
83 drbg_flag_t flags; /* flags for the cipher */ 83 drbg_flag_t flags; /* flags for the cipher */
84 __u8 statelen; /* maximum state length */ 84 __u8 statelen; /* maximum state length */
85 /*
86 * maximum length of personalization string or additional input
87 * string -- exponent for base 2
88 */
89 __u8 max_addtllen;
90 /* maximum bits per RNG request -- exponent for base 2*/
91 __u8 max_bits;
92 /* maximum number of requests -- exponent for base 2 */
93 __u8 max_req;
94 __u8 blocklen_bytes; /* block size of output in bytes */ 85 __u8 blocklen_bytes; /* block size of output in bytes */
95 char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ 86 char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
96 /* kernel crypto API backend cipher name */ 87 /* kernel crypto API backend cipher name */
@@ -156,18 +147,33 @@ static inline __u8 drbg_keylen(struct drbg_state *drbg)
156 147
157static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) 148static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
158{ 149{
159 /* max_bits is in bits, but buflen is in bytes */ 150 /* SP800-90A requires the limit 2**19 bits, but we return bytes */
160 return (1 << (drbg->core->max_bits - 3)); 151 return (1 << 16);
161} 152}
162 153
163static inline size_t drbg_max_addtl(struct drbg_state *drbg) 154static inline size_t drbg_max_addtl(struct drbg_state *drbg)
164{ 155{
165 return (1UL<<(drbg->core->max_addtllen)); 156 /* SP800-90A requires 2**35 bytes additional info str / pers str */
157#if (__BITS_PER_LONG == 32)
158 /*
159 * SP800-90A allows smaller maximum numbers to be returned -- we
160 * return SIZE_MAX - 1 to allow the verification of the enforcement
161 * of this value in drbg_healthcheck_sanity.
162 */
163 return (SIZE_MAX - 1);
164#else
165 return (1UL<<35);
166#endif
166} 167}
167 168
168static inline size_t drbg_max_requests(struct drbg_state *drbg) 169static inline size_t drbg_max_requests(struct drbg_state *drbg)
169{ 170{
170 return (1UL<<(drbg->core->max_req)); 171 /* SP800-90A requires 2**48 maximum requests before reseeding */
172#if (__BITS_PER_LONG == 32)
173 return SIZE_MAX;
174#else
175 return (1UL<<48);
176#endif
171} 177}
172 178
173/* 179/*
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 9b6f32a6cad1..3b4af1d7c7e9 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -117,6 +117,15 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
117int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); 117int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
118int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); 118int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
119 119
120int shash_ahash_mcryptd_update(struct ahash_request *req,
121 struct shash_desc *desc);
122int shash_ahash_mcryptd_final(struct ahash_request *req,
123 struct shash_desc *desc);
124int shash_ahash_mcryptd_finup(struct ahash_request *req,
125 struct shash_desc *desc);
126int shash_ahash_mcryptd_digest(struct ahash_request *req,
127 struct shash_desc *desc);
128
120int crypto_init_shash_ops_async(struct crypto_tfm *tfm); 129int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
121 130
122static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) 131static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
new file mode 100644
index 000000000000..c23ee1f7ee80
--- /dev/null
+++ b/include/crypto/mcryptd.h
@@ -0,0 +1,112 @@
1/*
2 * Software async multibuffer crypto daemon headers
3 *
4 * Author:
5 * Tim Chen <tim.c.chen@linux.intel.com>
6 *
7 * Copyright (c) 2014, Intel Corporation.
8 */
9
10#ifndef _CRYPTO_MCRYPT_H
11#define _CRYPTO_MCRYPT_H
12
13#include <linux/crypto.h>
14#include <linux/kernel.h>
15#include <crypto/hash.h>
16
17struct mcryptd_ahash {
18 struct crypto_ahash base;
19};
20
21static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
22 struct crypto_ahash *tfm)
23{
24 return (struct mcryptd_ahash *)tfm;
25}
26
27struct mcryptd_cpu_queue {
28 struct crypto_queue queue;
29 struct work_struct work;
30};
31
32struct mcryptd_queue {
33 struct mcryptd_cpu_queue __percpu *cpu_queue;
34};
35
36struct mcryptd_instance_ctx {
37 struct crypto_spawn spawn;
38 struct mcryptd_queue *queue;
39};
40
41struct mcryptd_hash_ctx {
42 struct crypto_shash *child;
43 struct mcryptd_alg_state *alg_state;
44};
45
46struct mcryptd_tag {
47 /* seq number of request */
48 unsigned seq_num;
49 /* arrival time of request */
50 unsigned long arrival;
51 unsigned long expire;
52 int cpu;
53};
54
55struct mcryptd_hash_request_ctx {
56 struct list_head waiter;
57 crypto_completion_t complete;
58 struct mcryptd_tag tag;
59 struct crypto_hash_walk walk;
60 u8 *out;
61 int flag;
62 struct shash_desc desc;
63};
64
65struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
66 u32 type, u32 mask);
67struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
68struct shash_desc *mcryptd_shash_desc(struct ahash_request *req);
69void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
70void mcryptd_flusher(struct work_struct *work);
71
72enum mcryptd_req_type {
73 MCRYPTD_NONE,
74 MCRYPTD_UPDATE,
75 MCRYPTD_FINUP,
76 MCRYPTD_DIGEST,
77 MCRYPTD_FINAL
78};
79
80struct mcryptd_alg_cstate {
81 unsigned long next_flush;
82 unsigned next_seq_num;
83 bool flusher_engaged;
84 struct delayed_work flush;
85 int cpu;
86 struct mcryptd_alg_state *alg_state;
87 void *mgr;
88 spinlock_t work_lock;
89 struct list_head work_list;
90 struct list_head flush_list;
91};
92
93struct mcryptd_alg_state {
94 struct mcryptd_alg_cstate __percpu *alg_cstate;
95 unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
96};
97
98/* return delay in jiffies from current time */
99static inline unsigned long get_delay(unsigned long t)
100{
101 long delay;
102
103 delay = (long) t - (long) jiffies;
104 if (delay <= 0)
105 return 0;
106 else
107 return (unsigned long) delay;
108}
109
110void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
111
112#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 0d164c6af539..54add2069901 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
15#define _LINUX_PUBLIC_KEY_H 15#define _LINUX_PUBLIC_KEY_H
16 16
17#include <linux/mpi.h> 17#include <linux/mpi.h>
18#include <keys/asymmetric-type.h>
18#include <crypto/hash_info.h> 19#include <crypto/hash_info.h>
19 20
20enum pkey_algo { 21enum pkey_algo {
@@ -98,8 +99,9 @@ struct key;
98extern int verify_signature(const struct key *key, 99extern int verify_signature(const struct key *key,
99 const struct public_key_signature *sig); 100 const struct public_key_signature *sig);
100 101
102struct asymmetric_key_id;
101extern struct key *x509_request_asymmetric_key(struct key *keyring, 103extern struct key *x509_request_asymmetric_key(struct key *keyring,
102 const char *issuer, 104 const struct asymmetric_key_id *kid,
103 const char *key_id); 105 bool partial);
104 106
105#endif /* _LINUX_PUBLIC_KEY_H */ 107#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 654151e24288..ddaef8620b2c 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -128,7 +128,7 @@
128#define IMX6Q_CLK_ECSPI5 116 128#define IMX6Q_CLK_ECSPI5 116
129#define IMX6DL_CLK_I2C4 116 129#define IMX6DL_CLK_I2C4 116
130#define IMX6QDL_CLK_ENET 117 130#define IMX6QDL_CLK_ENET 117
131#define IMX6QDL_CLK_ESAI 118 131#define IMX6QDL_CLK_ESAI_EXTAL 118
132#define IMX6QDL_CLK_GPT_IPG 119 132#define IMX6QDL_CLK_GPT_IPG 119
133#define IMX6QDL_CLK_GPT_IPG_PER 120 133#define IMX6QDL_CLK_GPT_IPG_PER 120
134#define IMX6QDL_CLK_GPU2D_CORE 121 134#define IMX6QDL_CLK_GPU2D_CORE 121
@@ -218,7 +218,36 @@
218#define IMX6QDL_CLK_LVDS2_SEL 205 218#define IMX6QDL_CLK_LVDS2_SEL 205
219#define IMX6QDL_CLK_LVDS1_GATE 206 219#define IMX6QDL_CLK_LVDS1_GATE 206
220#define IMX6QDL_CLK_LVDS2_GATE 207 220#define IMX6QDL_CLK_LVDS2_GATE 207
221#define IMX6QDL_CLK_ESAI_AHB 208 221#define IMX6QDL_CLK_ESAI_IPG 208
222#define IMX6QDL_CLK_END 209 222#define IMX6QDL_CLK_ESAI_MEM 209
223#define IMX6QDL_CLK_ASRC_IPG 210
224#define IMX6QDL_CLK_ASRC_MEM 211
225#define IMX6QDL_CLK_LVDS1_IN 212
226#define IMX6QDL_CLK_LVDS2_IN 213
227#define IMX6QDL_CLK_ANACLK1 214
228#define IMX6QDL_CLK_ANACLK2 215
229#define IMX6QDL_PLL1_BYPASS_SRC 216
230#define IMX6QDL_PLL2_BYPASS_SRC 217
231#define IMX6QDL_PLL3_BYPASS_SRC 218
232#define IMX6QDL_PLL4_BYPASS_SRC 219
233#define IMX6QDL_PLL5_BYPASS_SRC 220
234#define IMX6QDL_PLL6_BYPASS_SRC 221
235#define IMX6QDL_PLL7_BYPASS_SRC 222
236#define IMX6QDL_CLK_PLL1 223
237#define IMX6QDL_CLK_PLL2 224
238#define IMX6QDL_CLK_PLL3 225
239#define IMX6QDL_CLK_PLL4 226
240#define IMX6QDL_CLK_PLL5 227
241#define IMX6QDL_CLK_PLL6 228
242#define IMX6QDL_CLK_PLL7 229
243#define IMX6QDL_PLL1_BYPASS 230
244#define IMX6QDL_PLL2_BYPASS 231
245#define IMX6QDL_PLL3_BYPASS 232
246#define IMX6QDL_PLL4_BYPASS 233
247#define IMX6QDL_PLL5_BYPASS 234
248#define IMX6QDL_PLL6_BYPASS 235
249#define IMX6QDL_PLL7_BYPASS 236
250#define IMX6QDL_CLK_GPT_3M 237
251#define IMX6QDL_CLK_END 238
223 252
224#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ 253#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h
index b91dd462ba85..9ce4e421096f 100644
--- a/include/dt-bindings/clock/imx6sl-clock.h
+++ b/include/dt-bindings/clock/imx6sl-clock.h
@@ -146,6 +146,34 @@
146#define IMX6SL_CLK_PLL4_AUDIO_DIV 133 146#define IMX6SL_CLK_PLL4_AUDIO_DIV 133
147#define IMX6SL_CLK_SPBA 134 147#define IMX6SL_CLK_SPBA 134
148#define IMX6SL_CLK_ENET 135 148#define IMX6SL_CLK_ENET 135
149#define IMX6SL_CLK_END 136 149#define IMX6SL_CLK_LVDS1_SEL 136
150#define IMX6SL_CLK_LVDS1_OUT 137
151#define IMX6SL_CLK_LVDS1_IN 138
152#define IMX6SL_CLK_ANACLK1 139
153#define IMX6SL_PLL1_BYPASS_SRC 140
154#define IMX6SL_PLL2_BYPASS_SRC 141
155#define IMX6SL_PLL3_BYPASS_SRC 142
156#define IMX6SL_PLL4_BYPASS_SRC 143
157#define IMX6SL_PLL5_BYPASS_SRC 144
158#define IMX6SL_PLL6_BYPASS_SRC 145
159#define IMX6SL_PLL7_BYPASS_SRC 146
160#define IMX6SL_CLK_PLL1 147
161#define IMX6SL_CLK_PLL2 148
162#define IMX6SL_CLK_PLL3 149
163#define IMX6SL_CLK_PLL4 150
164#define IMX6SL_CLK_PLL5 151
165#define IMX6SL_CLK_PLL6 152
166#define IMX6SL_CLK_PLL7 153
167#define IMX6SL_PLL1_BYPASS 154
168#define IMX6SL_PLL2_BYPASS 155
169#define IMX6SL_PLL3_BYPASS 156
170#define IMX6SL_PLL4_BYPASS 157
171#define IMX6SL_PLL5_BYPASS 158
172#define IMX6SL_PLL6_BYPASS 159
173#define IMX6SL_PLL7_BYPASS 160
174#define IMX6SL_CLK_SSI1_IPG 161
175#define IMX6SL_CLK_SSI2_IPG 162
176#define IMX6SL_CLK_SSI3_IPG 163
177#define IMX6SL_CLK_END 164
150 178
151#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ 179#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h
index 421d8bb76f2f..995709119ec5 100644
--- a/include/dt-bindings/clock/imx6sx-clock.h
+++ b/include/dt-bindings/clock/imx6sx-clock.h
@@ -251,6 +251,29 @@
251#define IMX6SX_CLK_SAI2_IPG 238 251#define IMX6SX_CLK_SAI2_IPG 238
252#define IMX6SX_CLK_ESAI_IPG 239 252#define IMX6SX_CLK_ESAI_IPG 239
253#define IMX6SX_CLK_ESAI_MEM 240 253#define IMX6SX_CLK_ESAI_MEM 240
254#define IMX6SX_CLK_CLK_END 241 254#define IMX6SX_CLK_LVDS1_IN 241
255#define IMX6SX_CLK_ANACLK1 242
256#define IMX6SX_PLL1_BYPASS_SRC 243
257#define IMX6SX_PLL2_BYPASS_SRC 244
258#define IMX6SX_PLL3_BYPASS_SRC 245
259#define IMX6SX_PLL4_BYPASS_SRC 246
260#define IMX6SX_PLL5_BYPASS_SRC 247
261#define IMX6SX_PLL6_BYPASS_SRC 248
262#define IMX6SX_PLL7_BYPASS_SRC 249
263#define IMX6SX_CLK_PLL1 250
264#define IMX6SX_CLK_PLL2 251
265#define IMX6SX_CLK_PLL3 252
266#define IMX6SX_CLK_PLL4 253
267#define IMX6SX_CLK_PLL5 254
268#define IMX6SX_CLK_PLL6 255
269#define IMX6SX_CLK_PLL7 256
270#define IMX6SX_PLL1_BYPASS 257
271#define IMX6SX_PLL2_BYPASS 258
272#define IMX6SX_PLL3_BYPASS 259
273#define IMX6SX_PLL4_BYPASS 260
274#define IMX6SX_PLL5_BYPASS 261
275#define IMX6SX_PLL6_BYPASS 262
276#define IMX6SX_PLL7_BYPASS 263
277#define IMX6SX_CLK_CLK_END 264
255 278
256#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */ 279#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */
diff --git a/include/dt-bindings/clock/r8a7740-clock.h b/include/dt-bindings/clock/r8a7740-clock.h
new file mode 100644
index 000000000000..f6b4b0fe7a43
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7740-clock.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2014 Ulrich Hecht
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __DT_BINDINGS_CLOCK_R8A7740_H__
11#define __DT_BINDINGS_CLOCK_R8A7740_H__
12
13/* CPG */
14#define R8A7740_CLK_SYSTEM 0
15#define R8A7740_CLK_PLLC0 1
16#define R8A7740_CLK_PLLC1 2
17#define R8A7740_CLK_PLLC2 3
18#define R8A7740_CLK_R 4
19#define R8A7740_CLK_USB24S 5
20#define R8A7740_CLK_I 6
21#define R8A7740_CLK_ZG 7
22#define R8A7740_CLK_B 8
23#define R8A7740_CLK_M1 9
24#define R8A7740_CLK_HP 10
25#define R8A7740_CLK_HPP 11
26#define R8A7740_CLK_USBP 12
27#define R8A7740_CLK_S 13
28#define R8A7740_CLK_ZB 14
29#define R8A7740_CLK_M3 15
30#define R8A7740_CLK_CP 16
31
32/* MSTP1 */
33#define R8A7740_CLK_CEU21 28
34#define R8A7740_CLK_CEU20 27
35#define R8A7740_CLK_TMU0 25
36#define R8A7740_CLK_LCDC1 17
37#define R8A7740_CLK_IIC0 16
38#define R8A7740_CLK_TMU1 11
39#define R8A7740_CLK_LCDC0 0
40
41/* MSTP2 */
42#define R8A7740_CLK_SCIFA6 30
43#define R8A7740_CLK_SCIFA7 22
44#define R8A7740_CLK_DMAC1 18
45#define R8A7740_CLK_DMAC2 17
46#define R8A7740_CLK_DMAC3 16
47#define R8A7740_CLK_USBDMAC 14
48#define R8A7740_CLK_SCIFA5 7
49#define R8A7740_CLK_SCIFB 6
50#define R8A7740_CLK_SCIFA0 4
51#define R8A7740_CLK_SCIFA1 3
52#define R8A7740_CLK_SCIFA2 2
53#define R8A7740_CLK_SCIFA3 1
54#define R8A7740_CLK_SCIFA4 0
55
56/* MSTP3 */
57#define R8A7740_CLK_CMT1 29
58#define R8A7740_CLK_FSI 28
59#define R8A7740_CLK_IIC1 23
60#define R8A7740_CLK_USBF 20
61#define R8A7740_CLK_SDHI0 14
62#define R8A7740_CLK_SDHI1 13
63#define R8A7740_CLK_MMC 12
64#define R8A7740_CLK_GETHER 9
65#define R8A7740_CLK_TPU0 4
66
67/* MSTP4 */
68#define R8A7740_CLK_USBH 16
69#define R8A7740_CLK_SDHI2 15
70#define R8A7740_CLK_USBFUNC 7
71#define R8A7740_CLK_USBPHY 6
72
73/* SUBCK* */
74#define R8A7740_CLK_SUBCK 9
75#define R8A7740_CLK_SUBCK2 10
76
77#endif /* __DT_BINDINGS_CLOCK_R8A7740_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index f929a79e6998..8ea7ab0346ad 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -26,6 +26,7 @@
26#define R8A7790_CLK_MSIOF0 0 26#define R8A7790_CLK_MSIOF0 0
27 27
28/* MSTP1 */ 28/* MSTP1 */
29#define R8A7790_CLK_JPU 6
29#define R8A7790_CLK_TMU1 11 30#define R8A7790_CLK_TMU1 11
30#define R8A7790_CLK_TMU3 21 31#define R8A7790_CLK_TMU3 21
31#define R8A7790_CLK_TMU2 22 32#define R8A7790_CLK_TMU2 22
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index f0d4d1049162..58c3f49d068c 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -25,6 +25,7 @@
25#define R8A7791_CLK_MSIOF0 0 25#define R8A7791_CLK_MSIOF0 0
26 26
27/* MSTP1 */ 27/* MSTP1 */
28#define R8A7791_CLK_JPU 6
28#define R8A7791_CLK_TMU1 11 29#define R8A7791_CLK_TMU1 11
29#define R8A7791_CLK_TMU3 21 30#define R8A7791_CLK_TMU3 21
30#define R8A7791_CLK_TMU2 22 31#define R8A7791_CLK_TMU2 22
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
new file mode 100644
index 000000000000..9ac1043e25bc
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -0,0 +1,80 @@
1/*
2 * Copyright (C) 2014 Renesas Electronics Corporation
3 * Copyright 2013 Ideas On Board SPRL
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__
12#define __DT_BINDINGS_CLOCK_R8A7794_H__
13
14/* CPG */
15#define R8A7794_CLK_MAIN 0
16#define R8A7794_CLK_PLL0 1
17#define R8A7794_CLK_PLL1 2
18#define R8A7794_CLK_PLL3 3
19#define R8A7794_CLK_LB 4
20#define R8A7794_CLK_QSPI 5
21#define R8A7794_CLK_SDH 6
22#define R8A7794_CLK_SD0 7
23#define R8A7794_CLK_Z 8
24
25/* MSTP0 */
26#define R8A7794_CLK_MSIOF0 0
27
28/* MSTP1 */
29#define R8A7794_CLK_TMU1 11
30#define R8A7794_CLK_TMU3 21
31#define R8A7794_CLK_TMU2 22
32#define R8A7794_CLK_CMT0 24
33#define R8A7794_CLK_TMU0 25
34
35/* MSTP2 */
36#define R8A7794_CLK_SCIFA2 2
37#define R8A7794_CLK_SCIFA1 3
38#define R8A7794_CLK_SCIFA0 4
39#define R8A7794_CLK_MSIOF2 5
40#define R8A7794_CLK_SCIFB0 6
41#define R8A7794_CLK_SCIFB1 7
42#define R8A7794_CLK_MSIOF1 8
43#define R8A7794_CLK_SCIFB2 16
44
45/* MSTP3 */
46#define R8A7794_CLK_CMT1 29
47
48/* MSTP5 */
49#define R8A7794_CLK_THERMAL 22
50#define R8A7794_CLK_PWM 23
51
52/* MSTP7 */
53#define R8A7794_CLK_HSCIF2 13
54#define R8A7794_CLK_SCIF5 14
55#define R8A7794_CLK_SCIF4 15
56#define R8A7794_CLK_HSCIF1 16
57#define R8A7794_CLK_HSCIF0 17
58#define R8A7794_CLK_SCIF3 18
59#define R8A7794_CLK_SCIF2 19
60#define R8A7794_CLK_SCIF1 20
61#define R8A7794_CLK_SCIF0 21
62
63/* MSTP8 */
64#define R8A7794_CLK_ETHER 13
65
66/* MSTP9 */
67#define R8A7794_CLK_GPIO6 5
68#define R8A7794_CLK_GPIO5 7
69#define R8A7794_CLK_GPIO4 8
70#define R8A7794_CLK_GPIO3 9
71#define R8A7794_CLK_GPIO2 10
72#define R8A7794_CLK_GPIO1 11
73#define R8A7794_CLK_GPIO0 12
74
75/* MSTP11 */
76#define R8A7794_CLK_SCIFA3 6
77#define R8A7794_CLK_SCIFA4 7
78#define R8A7794_CLK_SCIFA5 8
79
80#endif /* __DT_BINDINGS_CLOCK_R8A7794_H__ */
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 00953d9484cb..d6b56b21539b 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -166,6 +166,9 @@
166#define VF610_CLK_DMAMUX3 153 166#define VF610_CLK_DMAMUX3 153
167#define VF610_CLK_FLEXCAN0_EN 154 167#define VF610_CLK_FLEXCAN0_EN 154
168#define VF610_CLK_FLEXCAN1_EN 155 168#define VF610_CLK_FLEXCAN1_EN 155
169#define VF610_CLK_END 156 169#define VF610_CLK_PLL7_MAIN 156
170#define VF610_CLK_USBPHY0 157
171#define VF610_CLK_USBPHY1 158
172#define VF610_CLK_END 159
170 173
171#endif /* __DT_BINDINGS_CLOCK_VF610_H */ 174#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/input/ti-drv260x.h b/include/dt-bindings/input/ti-drv260x.h
new file mode 100644
index 000000000000..2626e6d9f707
--- /dev/null
+++ b/include/dt-bindings/input/ti-drv260x.h
@@ -0,0 +1,36 @@
1/*
2 * DRV260X haptics driver family
3 *
4 * Author: Dan Murphy <dmurphy@ti.com>
5 *
6 * Copyright: (C) 2014 Texas Instruments, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef _DT_BINDINGS_TI_DRV260X_H
19#define _DT_BINDINGS_TI_DRV260X_H
20
21/* Calibration Types */
22#define DRV260X_LRA_MODE 0x00
23#define DRV260X_LRA_NO_CAL_MODE 0x01
24#define DRV260X_ERM_MODE 0x02
25
26/* Library Selection */
27#define DRV260X_LIB_EMPTY 0x00
28#define DRV260X_ERM_LIB_A 0x01
29#define DRV260X_ERM_LIB_B 0x02
30#define DRV260X_ERM_LIB_C 0x03
31#define DRV260X_ERM_LIB_D 0x04
32#define DRV260X_ERM_LIB_E 0x05
33#define DRV260X_LIB_LRA 0x06
34#define DRV260X_ERM_LIB_F 0x07
35
36#endif
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 0fee6ff77ffc..bbca3d038900 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -20,6 +20,11 @@
20 20
21#define AT91_PINCTRL_PULL_UP_DEGLITCH (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DEGLITCH) 21#define AT91_PINCTRL_PULL_UP_DEGLITCH (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DEGLITCH)
22 22
23#define AT91_PINCTRL_DRIVE_STRENGTH_DEFAULT (0x0 << 5)
24#define AT91_PINCTRL_DRIVE_STRENGTH_LOW (0x1 << 5)
25#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5)
26#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5)
27
23#define AT91_PIOA 0 28#define AT91_PIOA 0
24#define AT91_PIOB 1 29#define AT91_PIOB 1
25#define AT91_PIOC 2 30#define AT91_PIOC 2
diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h
index cd5788be82ce..743e66a95e13 100644
--- a/include/dt-bindings/pinctrl/rockchip.h
+++ b/include/dt-bindings/pinctrl/rockchip.h
@@ -28,5 +28,7 @@
28#define RK_FUNC_GPIO 0 28#define RK_FUNC_GPIO 0
29#define RK_FUNC_1 1 29#define RK_FUNC_1 1
30#define RK_FUNC_2 2 30#define RK_FUNC_2 2
31#define RK_FUNC_3 3
32#define RK_FUNC_4 4
31 33
32#endif 34#endif
diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h
new file mode 100644
index 000000000000..0c6d6a3c15a2
--- /dev/null
+++ b/include/dt-bindings/sound/cs35l32.h
@@ -0,0 +1,26 @@
1#ifndef __DT_CS35L32_H
2#define __DT_CS35L32_H
3
4#define CS35L32_BOOST_MGR_AUTO 0
5#define CS35L32_BOOST_MGR_AUTO_AUDIO 1
6#define CS35L32_BOOST_MGR_BYPASS 2
7#define CS35L32_BOOST_MGR_FIXED 3
8
9#define CS35L32_DATA_CFG_LR_VP 0
10#define CS35L32_DATA_CFG_LR_STAT 1
11#define CS35L32_DATA_CFG_LR 2
12#define CS35L32_DATA_CFG_LR_VPSTAT 3
13
14#define CS35L32_BATT_THRESH_3_1V 0
15#define CS35L32_BATT_THRESH_3_2V 1
16#define CS35L32_BATT_THRESH_3_3V 2
17#define CS35L32_BATT_THRESH_3_4V 3
18
19#define CS35L32_BATT_RECOV_3_1V 0
20#define CS35L32_BATT_RECOV_3_2V 1
21#define CS35L32_BATT_RECOV_3_3V 2
22#define CS35L32_BATT_RECOV_3_4V 3
23#define CS35L32_BATT_RECOV_3_5V 4
24#define CS35L32_BATT_RECOV_3_6V 5
25
26#endif /* __DT_CS35L32_H */
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 7dd473496180..c0754abb2f56 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -19,6 +19,47 @@
19extern struct key_type key_type_asymmetric; 19extern struct key_type key_type_asymmetric;
20 20
21/* 21/*
22 * Identifiers for an asymmetric key ID. We have three ways of looking up a
23 * key derived from an X.509 certificate:
24 *
25 * (1) Serial Number & Issuer. Non-optional. This is the only valid way to
26 * map a PKCS#7 signature to an X.509 certificate.
27 *
28 * (2) Issuer & Subject Unique IDs. Optional. These were the original way to
29 * match X.509 certificates, but have fallen into disuse in favour of (3).
30 *
31 * (3) Auth & Subject Key Identifiers. Optional. SKIDs are only provided on
32 * CA keys that are intended to sign other keys, so don't appear in end
33 * user certificates unless forced.
34 *
35 * We could also support an PGP key identifier, which is just a SHA1 sum of the
36 * public key and certain parameters, but since we don't support PGP keys at
37 * the moment, we shall ignore those.
38 *
39 * What we actually do is provide a place where binary identifiers can be
40 * stashed and then compare against them when checking for an id match.
41 */
42struct asymmetric_key_id {
43 unsigned short len;
44 unsigned char data[];
45};
46
47struct asymmetric_key_ids {
48 void *id[2];
49};
50
51extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
52 const struct asymmetric_key_id *kid2);
53
54extern bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1,
55 const struct asymmetric_key_id *kid2);
56
57extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
58 size_t len_1,
59 const void *val_2,
60 size_t len_2);
61
62/*
22 * The payload is at the discretion of the subtype. 63 * The payload is at the discretion of the subtype.
23 */ 64 */
24 65
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index 3ab1873a4bfa..cebefb069c44 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -40,7 +40,6 @@ struct key_preparsed_payload;
40extern int user_preparse(struct key_preparsed_payload *prep); 40extern int user_preparse(struct key_preparsed_payload *prep);
41extern void user_free_preparse(struct key_preparsed_payload *prep); 41extern void user_free_preparse(struct key_preparsed_payload *prep);
42extern int user_update(struct key *key, struct key_preparsed_payload *prep); 42extern int user_update(struct key *key, struct key_preparsed_payload *prep);
43extern int user_match(const struct key *key, const void *criterion);
44extern void user_revoke(struct key *key); 43extern void user_revoke(struct key *key);
45extern void user_destroy(struct key *key); 44extern void user_destroy(struct key *key);
46extern void user_describe(const struct key *user, struct seq_file *m); 45extern void user_describe(const struct key *user, struct seq_file *m);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 35b0c121bb65..2f2aac8448a4 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -25,26 +25,25 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/types.h> 26#include <linux/types.h>
27 27
28#define VGIC_NR_IRQS 256 28#define VGIC_NR_IRQS_LEGACY 256
29#define VGIC_NR_SGIS 16 29#define VGIC_NR_SGIS 16
30#define VGIC_NR_PPIS 16 30#define VGIC_NR_PPIS 16
31#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) 31#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
32#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
33#define VGIC_MAX_CPUS KVM_MAX_VCPUS
34 32
35#define VGIC_V2_MAX_LRS (1 << 6) 33#define VGIC_V2_MAX_LRS (1 << 6)
36#define VGIC_V3_MAX_LRS 16 34#define VGIC_V3_MAX_LRS 16
35#define VGIC_MAX_IRQS 1024
37 36
38/* Sanity checks... */ 37/* Sanity checks... */
39#if (VGIC_MAX_CPUS > 8) 38#if (KVM_MAX_VCPUS > 8)
40#error Invalid number of CPU interfaces 39#error Invalid number of CPU interfaces
41#endif 40#endif
42 41
43#if (VGIC_NR_IRQS & 31) 42#if (VGIC_NR_IRQS_LEGACY & 31)
44#error "VGIC_NR_IRQS must be a multiple of 32" 43#error "VGIC_NR_IRQS must be a multiple of 32"
45#endif 44#endif
46 45
47#if (VGIC_NR_IRQS > 1024) 46#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
48#error "VGIC_NR_IRQS must be <= 1024" 47#error "VGIC_NR_IRQS must be <= 1024"
49#endif 48#endif
50 49
@@ -54,19 +53,33 @@
54 * - a bunch of shared interrupts (SPI) 53 * - a bunch of shared interrupts (SPI)
55 */ 54 */
56struct vgic_bitmap { 55struct vgic_bitmap {
57 union { 56 /*
58 u32 reg[VGIC_NR_PRIVATE_IRQS / 32]; 57 * - One UL per VCPU for private interrupts (assumes UL is at
59 DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS); 58 * least 32 bits)
60 } percpu[VGIC_MAX_CPUS]; 59 * - As many UL as necessary for shared interrupts.
61 union { 60 *
62 u32 reg[VGIC_NR_SHARED_IRQS / 32]; 61 * The private interrupts are accessed via the "private"
63 DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS); 62 * field, one UL per vcpu (the state for vcpu n is in
64 } shared; 63 * private[n]). The shared interrupts are accessed via the
64 * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
65 */
66 unsigned long *private;
67 unsigned long *shared;
65}; 68};
66 69
67struct vgic_bytemap { 70struct vgic_bytemap {
68 u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4]; 71 /*
69 u32 shared[VGIC_NR_SHARED_IRQS / 4]; 72 * - 8 u32 per VCPU for private interrupts
73 * - As many u32 as necessary for shared interrupts.
74 *
75 * The private interrupts are accessed via the "private"
76 * field, (the state for vcpu n is in private[n*8] to
77 * private[n*8 + 7]). The shared interrupts are accessed via
78 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
79 * shared[(n-32)/4] word).
80 */
81 u32 *private;
82 u32 *shared;
70}; 83};
71 84
72struct kvm_vcpu; 85struct kvm_vcpu;
@@ -127,6 +140,9 @@ struct vgic_dist {
127 bool in_kernel; 140 bool in_kernel;
128 bool ready; 141 bool ready;
129 142
143 int nr_cpus;
144 int nr_irqs;
145
130 /* Virtual control interface mapping */ 146 /* Virtual control interface mapping */
131 void __iomem *vctrl_base; 147 void __iomem *vctrl_base;
132 148
@@ -140,11 +156,25 @@ struct vgic_dist {
140 /* Interrupt enabled (one bit per IRQ) */ 156 /* Interrupt enabled (one bit per IRQ) */
141 struct vgic_bitmap irq_enabled; 157 struct vgic_bitmap irq_enabled;
142 158
143 /* Interrupt 'pin' level */ 159 /* Level-triggered interrupt external input is asserted */
144 struct vgic_bitmap irq_state; 160 struct vgic_bitmap irq_level;
145 161
146 /* Level-triggered interrupt in progress */ 162 /*
147 struct vgic_bitmap irq_active; 163 * Interrupt state is pending on the distributor
164 */
165 struct vgic_bitmap irq_pending;
166
167 /*
168 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
169 * interrupts. Essentially holds the state of the flip-flop in
170 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
171 * Once set, it is only cleared for level-triggered interrupts on
172 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
173 */
174 struct vgic_bitmap irq_soft_pend;
175
176 /* Level-triggered interrupt queued on VCPU interface */
177 struct vgic_bitmap irq_queued;
148 178
149 /* Interrupt priority. Not used yet. */ 179 /* Interrupt priority. Not used yet. */
150 struct vgic_bytemap irq_priority; 180 struct vgic_bytemap irq_priority;
@@ -152,15 +182,36 @@ struct vgic_dist {
152 /* Level/edge triggered */ 182 /* Level/edge triggered */
153 struct vgic_bitmap irq_cfg; 183 struct vgic_bitmap irq_cfg;
154 184
155 /* Source CPU per SGI and target CPU */ 185 /*
156 u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS]; 186 * Source CPU per SGI and target CPU:
157 187 *
158 /* Target CPU for each IRQ */ 188 * Each byte represent a SGI observable on a VCPU, each bit of
159 u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS]; 189 * this byte indicating if the corresponding VCPU has
160 struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS]; 190 * generated this interrupt. This is a GICv2 feature only.
191 *
192 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
193 * the SGIs observable on VCPUn.
194 */
195 u8 *irq_sgi_sources;
196
197 /*
198 * Target CPU for each SPI:
199 *
200 * Array of available SPI, each byte indicating the target
201 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
202 */
203 u8 *irq_spi_cpu;
204
205 /*
206 * Reverse lookup of irq_spi_cpu for faster compute pending:
207 *
208 * Array of bitmaps, one per VCPU, describing if IRQn is
209 * routed to a particular VCPU.
210 */
211 struct vgic_bitmap *irq_spi_target;
161 212
162 /* Bitmap indicating which CPU has something pending */ 213 /* Bitmap indicating which CPU has something pending */
163 unsigned long irq_pending_on_cpu; 214 unsigned long *irq_pending_on_cpu;
164#endif 215#endif
165}; 216};
166 217
@@ -190,11 +241,11 @@ struct vgic_v3_cpu_if {
190struct vgic_cpu { 241struct vgic_cpu {
191#ifdef CONFIG_KVM_ARM_VGIC 242#ifdef CONFIG_KVM_ARM_VGIC
192 /* per IRQ to LR mapping */ 243 /* per IRQ to LR mapping */
193 u8 vgic_irq_lr_map[VGIC_NR_IRQS]; 244 u8 *vgic_irq_lr_map;
194 245
195 /* Pending interrupts on this VCPU */ 246 /* Pending interrupts on this VCPU */
196 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); 247 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
197 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); 248 unsigned long *pending_shared;
198 249
199 /* Bitmap of used/free list registers */ 250 /* Bitmap of used/free list registers */
200 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); 251 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
@@ -225,7 +276,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
225int kvm_vgic_hyp_init(void); 276int kvm_vgic_hyp_init(void);
226int kvm_vgic_init(struct kvm *kvm); 277int kvm_vgic_init(struct kvm *kvm);
227int kvm_vgic_create(struct kvm *kvm); 278int kvm_vgic_create(struct kvm *kvm);
228int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); 279void kvm_vgic_destroy(struct kvm *kvm);
280void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
229void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); 281void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
230void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); 282void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
231int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, 283int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 807cbc46d73e..b7926bb9b444 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -587,7 +587,6 @@ static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
587#if defined(CONFIG_ACPI) && defined(CONFIG_PM) 587#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
588struct acpi_device *acpi_dev_pm_get_node(struct device *dev); 588struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
589int acpi_dev_pm_attach(struct device *dev, bool power_on); 589int acpi_dev_pm_attach(struct device *dev, bool power_on);
590void acpi_dev_pm_detach(struct device *dev, bool power_off);
591#else 590#else
592static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) 591static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
593{ 592{
@@ -597,7 +596,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
597{ 596{
598 return -ENODEV; 597 return -ENODEV;
599} 598}
600static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {}
601#endif 599#endif
602 600
603#ifdef CONFIG_ACPI 601#ifdef CONFIG_ACPI
diff --git a/include/linux/aer.h b/include/linux/aer.h
index c826d1c28f9c..4fef65e57023 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,8 @@
7#ifndef _AER_H_ 7#ifndef _AER_H_
8#define _AER_H_ 8#define _AER_H_
9 9
10#include <linux/types.h>
11
10#define AER_NONFATAL 0 12#define AER_NONFATAL 0
11#define AER_FATAL 1 13#define AER_FATAL 1
12#define AER_CORRECTABLE 2 14#define AER_CORRECTABLE 2
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 09a947e8bc87..642d6ae4030c 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -22,19 +22,6 @@ struct ata_port_info;
22struct ahci_host_priv; 22struct ahci_host_priv;
23struct platform_device; 23struct platform_device;
24 24
25/*
26 * Note ahci_platform_data is deprecated, it is only kept around for use
27 * by the old da850 and spear13xx ahci code.
28 * New drivers should instead declare their own platform_driver struct, and
29 * use ahci_platform* functions in their own probe, suspend and resume methods.
30 */
31struct ahci_platform_data {
32 int (*init)(struct device *dev, void __iomem *addr);
33 void (*exit)(struct device *dev);
34 int (*suspend)(struct device *dev);
35 int (*resume)(struct device *dev);
36};
37
38int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); 25int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
39void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); 26void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
40int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); 27int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index fdd7e1b61f60..c324f5700d1a 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -44,10 +44,15 @@ struct amba_driver {
44 const struct amba_id *id_table; 44 const struct amba_id *id_table;
45}; 45};
46 46
47/*
48 * Constants for the designer field of the Peripheral ID register. When bit 7
49 * is set to '1', bits [6:0] should be the JEP106 manufacturer identity code.
50 */
47enum amba_vendor { 51enum amba_vendor {
48 AMBA_VENDOR_ARM = 0x41, 52 AMBA_VENDOR_ARM = 0x41,
49 AMBA_VENDOR_ST = 0x80, 53 AMBA_VENDOR_ST = 0x80,
50 AMBA_VENDOR_QCOM = 0x51, 54 AMBA_VENDOR_QCOM = 0x51,
55 AMBA_VENDOR_LSI = 0xb6,
51}; 56};
52 57
53extern struct bus_type amba_bustype; 58extern struct bus_type amba_bustype;
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index b9fde17f767c..5c618a084225 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -8,11 +8,6 @@ struct pata_platform_info {
8 * spacing used by ata_std_ports(). 8 * spacing used by ata_std_ports().
9 */ 9 */
10 unsigned int ioport_shift; 10 unsigned int ioport_shift;
11 /*
12 * Indicate platform specific irq types and initial
13 * IRQ flags when call request_irq()
14 */
15 unsigned int irq_flags;
16}; 11};
17 12
18extern int __pata_platform_probe(struct device *dev, 13extern int __pata_platform_probe(struct device *dev,
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 4c7a4b2104bf..91b77f8d495d 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -1,6 +1,8 @@
1#ifndef __LINUX_ATMEL_MCI_H 1#ifndef __LINUX_ATMEL_MCI_H
2#define __LINUX_ATMEL_MCI_H 2#define __LINUX_ATMEL_MCI_H
3 3
4#include <linux/types.h>
5
4#define ATMCI_MAX_NR_SLOTS 2 6#define ATMCI_MAX_NR_SLOTS 2
5 7
6/** 8/**
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
index 89a931babecf..b87c1c7c242a 100644
--- a/include/linux/atmel_tc.h
+++ b/include/linux/atmel_tc.h
@@ -44,12 +44,13 @@ struct atmel_tcb_config {
44/** 44/**
45 * struct atmel_tc - information about a Timer/Counter Block 45 * struct atmel_tc - information about a Timer/Counter Block
46 * @pdev: physical device 46 * @pdev: physical device
47 * @iomem: resource associated with the I/O register
48 * @regs: mapping through which the I/O registers can be accessed 47 * @regs: mapping through which the I/O registers can be accessed
48 * @id: block id
49 * @tcb_config: configuration data from SoC 49 * @tcb_config: configuration data from SoC
50 * @irq: irq for each of the three channels 50 * @irq: irq for each of the three channels
51 * @clk: internal clock source for each of the three channels 51 * @clk: internal clock source for each of the three channels
52 * @node: list node, for tclib internal use 52 * @node: list node, for tclib internal use
53 * @allocated: if already used, for tclib internal use
53 * 54 *
54 * On some platforms, each TC channel has its own clocks and IRQs, 55 * On some platforms, each TC channel has its own clocks and IRQs,
55 * while on others, all TC channels share the same clock and IRQ. 56 * while on others, all TC channels share the same clock and IRQ.
@@ -61,15 +62,16 @@ struct atmel_tcb_config {
61 */ 62 */
62struct atmel_tc { 63struct atmel_tc {
63 struct platform_device *pdev; 64 struct platform_device *pdev;
64 struct resource *iomem;
65 void __iomem *regs; 65 void __iomem *regs;
66 int id;
66 const struct atmel_tcb_config *tcb_config; 67 const struct atmel_tcb_config *tcb_config;
67 int irq[3]; 68 int irq[3];
68 struct clk *clk[3]; 69 struct clk *clk[3];
69 struct list_head node; 70 struct list_head node;
71 bool allocated;
70}; 72};
71 73
72extern struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name); 74extern struct atmel_tc *atmel_tc_alloc(unsigned block);
73extern void atmel_tc_free(struct atmel_tc *tc); 75extern void atmel_tc_free(struct atmel_tc *tc);
74 76
75/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */ 77/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
@@ -258,5 +260,10 @@ extern const u8 atmel_tc_divisors[5];
258#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */ 260#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
259#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */ 261#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
260#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */ 262#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
263#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
264 ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
265 ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
266 ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
267 /* all IRQs */
261 268
262#endif 269#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index fef3a809e7cf..5b08a8540ecf 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -3,42 +3,6 @@
3#define _LINUX_ATOMIC_H 3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5 5
6/*
7 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
8 * We need the ugly external functions to break header recursion hell.
9 */
10#ifndef smp_mb__before_atomic_inc
11static inline void __deprecated smp_mb__before_atomic_inc(void)
12{
13 extern void __smp_mb__before_atomic(void);
14 __smp_mb__before_atomic();
15}
16#endif
17
18#ifndef smp_mb__after_atomic_inc
19static inline void __deprecated smp_mb__after_atomic_inc(void)
20{
21 extern void __smp_mb__after_atomic(void);
22 __smp_mb__after_atomic();
23}
24#endif
25
26#ifndef smp_mb__before_atomic_dec
27static inline void __deprecated smp_mb__before_atomic_dec(void)
28{
29 extern void __smp_mb__before_atomic(void);
30 __smp_mb__before_atomic();
31}
32#endif
33
34#ifndef smp_mb__after_atomic_dec
35static inline void __deprecated smp_mb__after_atomic_dec(void)
36{
37 extern void __smp_mb__after_atomic(void);
38 __smp_mb__after_atomic();
39}
40#endif
41
42/** 6/**
43 * atomic_add_unless - add unless the number is already a given value 7 * atomic_add_unless - add unless the number is already a given value
44 * @v: pointer of type atomic_t 8 * @v: pointer of type atomic_t
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 089743ade734..9b0a15d06a4f 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -27,10 +27,13 @@
27 * counter raised only while it is under our special handling; 27 * counter raised only while it is under our special handling;
28 * 28 *
29 * iii. after the lockless scan step have selected a potential balloon page for 29 * iii. after the lockless scan step have selected a potential balloon page for
30 * isolation, re-test the page->mapping flags and the page ref counter 30 * isolation, re-test the PageBalloon mark and the PagePrivate flag
31 * under the proper page lock, to ensure isolating a valid balloon page 31 * under the proper page lock, to ensure isolating a valid balloon page
32 * (not yet isolated, nor under release procedure) 32 * (not yet isolated, nor under release procedure)
33 * 33 *
34 * iv. isolation or dequeueing procedure must clear PagePrivate flag under
35 * page lock together with removing page from balloon device page list.
36 *
34 * The functions provided by this interface are placed to help on coping with 37 * The functions provided by this interface are placed to help on coping with
35 * the aforementioned balloon page corner case, as well as to ensure the simple 38 * the aforementioned balloon page corner case, as well as to ensure the simple
36 * set of exposed rules are satisfied while we are dealing with balloon pages 39 * set of exposed rules are satisfied while we are dealing with balloon pages
@@ -54,43 +57,22 @@
54 * balloon driver as a page book-keeper for its registered balloon devices. 57 * balloon driver as a page book-keeper for its registered balloon devices.
55 */ 58 */
56struct balloon_dev_info { 59struct balloon_dev_info {
57 void *balloon_device; /* balloon device descriptor */
58 struct address_space *mapping; /* balloon special page->mapping */
59 unsigned long isolated_pages; /* # of isolated pages for migration */ 60 unsigned long isolated_pages; /* # of isolated pages for migration */
60 spinlock_t pages_lock; /* Protection to pages list */ 61 spinlock_t pages_lock; /* Protection to pages list */
61 struct list_head pages; /* Pages enqueued & handled to Host */ 62 struct list_head pages; /* Pages enqueued & handled to Host */
63 int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
64 struct page *page, enum migrate_mode mode);
62}; 65};
63 66
64extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); 67extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
65extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); 68extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
66extern struct balloon_dev_info *balloon_devinfo_alloc(
67 void *balloon_dev_descriptor);
68 69
69static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) 70static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
70{
71 kfree(b_dev_info);
72}
73
74/*
75 * balloon_page_free - release a balloon page back to the page free lists
76 * @page: ballooned page to be set free
77 *
78 * This function must be used to properly set free an isolated/dequeued balloon
79 * page at the end of a sucessful page migration, or at the balloon driver's
80 * page release procedure.
81 */
82static inline void balloon_page_free(struct page *page)
83{ 71{
84 /* 72 balloon->isolated_pages = 0;
85 * Balloon pages always get an extra refcount before being isolated 73 spin_lock_init(&balloon->pages_lock);
86 * and before being dequeued to help on sorting out fortuite colisions 74 INIT_LIST_HEAD(&balloon->pages);
87 * between a thread attempting to isolate and another thread attempting 75 balloon->migratepage = NULL;
88 * to release the very same balloon page.
89 *
90 * Before we handle the page back to Buddy, lets drop its extra refcnt.
91 */
92 put_page(page);
93 __free_page(page);
94} 76}
95 77
96#ifdef CONFIG_BALLOON_COMPACTION 78#ifdef CONFIG_BALLOON_COMPACTION
@@ -98,107 +80,58 @@ extern bool balloon_page_isolate(struct page *page);
98extern void balloon_page_putback(struct page *page); 80extern void balloon_page_putback(struct page *page);
99extern int balloon_page_migrate(struct page *newpage, 81extern int balloon_page_migrate(struct page *newpage,
100 struct page *page, enum migrate_mode mode); 82 struct page *page, enum migrate_mode mode);
101extern struct address_space
102*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
103 const struct address_space_operations *a_ops);
104
105static inline void balloon_mapping_free(struct address_space *balloon_mapping)
106{
107 kfree(balloon_mapping);
108}
109 83
110/* 84/*
111 * page_flags_cleared - helper to perform balloon @page ->flags tests. 85 * __is_movable_balloon_page - helper to perform @page PageBalloon tests
112 *
113 * As balloon pages are obtained from buddy and we do not play with page->flags
114 * at driver level (exception made when we get the page lock for compaction),
115 * we can safely identify a ballooned page by checking if the
116 * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also
117 * helps us skip ballooned pages that are locked for compaction or release, thus
118 * mitigating their racy check at balloon_page_movable()
119 */
120static inline bool page_flags_cleared(struct page *page)
121{
122 return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP);
123}
124
125/*
126 * __is_movable_balloon_page - helper to perform @page mapping->flags tests
127 */ 86 */
128static inline bool __is_movable_balloon_page(struct page *page) 87static inline bool __is_movable_balloon_page(struct page *page)
129{ 88{
130 struct address_space *mapping = page->mapping; 89 return PageBalloon(page);
131 return mapping_balloon(mapping);
132} 90}
133 91
134/* 92/*
135 * balloon_page_movable - test page->mapping->flags to identify balloon pages 93 * balloon_page_movable - test PageBalloon to identify balloon pages
136 * that can be moved by compaction/migration. 94 * and PagePrivate to check that the page is not
137 * 95 * isolated and can be moved by compaction/migration.
138 * This function is used at core compaction's page isolation scheme, therefore
139 * most pages exposed to it are not enlisted as balloon pages and so, to avoid
140 * undesired side effects like racing against __free_pages(), we cannot afford
141 * holding the page locked while testing page->mapping->flags here.
142 * 96 *
143 * As we might return false positives in the case of a balloon page being just 97 * As we might return false positives in the case of a balloon page being just
144 * released under us, the page->mapping->flags need to be re-tested later, 98 * released under us, this need to be re-tested later, under the page lock.
145 * under the proper page lock, at the functions that will be coping with the
146 * balloon page case.
147 */ 99 */
148static inline bool balloon_page_movable(struct page *page) 100static inline bool balloon_page_movable(struct page *page)
149{ 101{
150 /* 102 return PageBalloon(page) && PagePrivate(page);
151 * Before dereferencing and testing mapping->flags, let's make sure
152 * this is not a page that uses ->mapping in a different way
153 */
154 if (page_flags_cleared(page) && !page_mapped(page) &&
155 page_count(page) == 1)
156 return __is_movable_balloon_page(page);
157
158 return false;
159} 103}
160 104
161/* 105/*
162 * isolated_balloon_page - identify an isolated balloon page on private 106 * isolated_balloon_page - identify an isolated balloon page on private
163 * compaction/migration page lists. 107 * compaction/migration page lists.
164 *
165 * After a compaction thread isolates a balloon page for migration, it raises
166 * the page refcount to prevent concurrent compaction threads from re-isolating
167 * the same page. For that reason putback_movable_pages(), or other routines
168 * that need to identify isolated balloon pages on private pagelists, cannot
169 * rely on balloon_page_movable() to accomplish the task.
170 */ 108 */
171static inline bool isolated_balloon_page(struct page *page) 109static inline bool isolated_balloon_page(struct page *page)
172{ 110{
173 /* Already isolated balloon pages, by default, have a raised refcount */ 111 return PageBalloon(page);
174 if (page_flags_cleared(page) && !page_mapped(page) &&
175 page_count(page) >= 2)
176 return __is_movable_balloon_page(page);
177
178 return false;
179} 112}
180 113
181/* 114/*
182 * balloon_page_insert - insert a page into the balloon's page list and make 115 * balloon_page_insert - insert a page into the balloon's page list and make
183 * the page->mapping assignment accordingly. 116 * the page->private assignment accordingly.
117 * @balloon : pointer to balloon device
184 * @page : page to be assigned as a 'balloon page' 118 * @page : page to be assigned as a 'balloon page'
185 * @mapping : allocated special 'balloon_mapping'
186 * @head : balloon's device page list head
187 * 119 *
188 * Caller must ensure the page is locked and the spin_lock protecting balloon 120 * Caller must ensure the page is locked and the spin_lock protecting balloon
189 * pages list is held before inserting a page into the balloon device. 121 * pages list is held before inserting a page into the balloon device.
190 */ 122 */
191static inline void balloon_page_insert(struct page *page, 123static inline void balloon_page_insert(struct balloon_dev_info *balloon,
192 struct address_space *mapping, 124 struct page *page)
193 struct list_head *head)
194{ 125{
195 page->mapping = mapping; 126 __SetPageBalloon(page);
196 list_add(&page->lru, head); 127 SetPagePrivate(page);
128 set_page_private(page, (unsigned long)balloon);
129 list_add(&page->lru, &balloon->pages);
197} 130}
198 131
199/* 132/*
200 * balloon_page_delete - delete a page from balloon's page list and clear 133 * balloon_page_delete - delete a page from balloon's page list and clear
201 * the page->mapping assignement accordingly. 134 * the page->private assignement accordingly.
202 * @page : page to be released from balloon's page list 135 * @page : page to be released from balloon's page list
203 * 136 *
204 * Caller must ensure the page is locked and the spin_lock protecting balloon 137 * Caller must ensure the page is locked and the spin_lock protecting balloon
@@ -206,8 +139,12 @@ static inline void balloon_page_insert(struct page *page,
206 */ 139 */
207static inline void balloon_page_delete(struct page *page) 140static inline void balloon_page_delete(struct page *page)
208{ 141{
209 page->mapping = NULL; 142 __ClearPageBalloon(page);
210 list_del(&page->lru); 143 set_page_private(page, 0);
144 if (PagePrivate(page)) {
145 ClearPagePrivate(page);
146 list_del(&page->lru);
147 }
211} 148}
212 149
213/* 150/*
@@ -216,11 +153,7 @@ static inline void balloon_page_delete(struct page *page)
216 */ 153 */
217static inline struct balloon_dev_info *balloon_page_device(struct page *page) 154static inline struct balloon_dev_info *balloon_page_device(struct page *page)
218{ 155{
219 struct address_space *mapping = page->mapping; 156 return (struct balloon_dev_info *)page_private(page);
220 if (likely(mapping))
221 return mapping->private_data;
222
223 return NULL;
224} 157}
225 158
226static inline gfp_t balloon_mapping_gfp_mask(void) 159static inline gfp_t balloon_mapping_gfp_mask(void)
@@ -228,34 +161,24 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
228 return GFP_HIGHUSER_MOVABLE; 161 return GFP_HIGHUSER_MOVABLE;
229} 162}
230 163
231static inline bool balloon_compaction_check(void)
232{
233 return true;
234}
235
236#else /* !CONFIG_BALLOON_COMPACTION */ 164#else /* !CONFIG_BALLOON_COMPACTION */
237 165
238static inline void *balloon_mapping_alloc(void *balloon_device, 166static inline void balloon_page_insert(struct balloon_dev_info *balloon,
239 const struct address_space_operations *a_ops) 167 struct page *page)
240{
241 return ERR_PTR(-EOPNOTSUPP);
242}
243
244static inline void balloon_mapping_free(struct address_space *balloon_mapping)
245{ 168{
246 return; 169 __SetPageBalloon(page);
170 list_add(&page->lru, &balloon->pages);
247} 171}
248 172
249static inline void balloon_page_insert(struct page *page, 173static inline void balloon_page_delete(struct page *page)
250 struct address_space *mapping,
251 struct list_head *head)
252{ 174{
253 list_add(&page->lru, head); 175 __ClearPageBalloon(page);
176 list_del(&page->lru);
254} 177}
255 178
256static inline void balloon_page_delete(struct page *page) 179static inline bool __is_movable_balloon_page(struct page *page)
257{ 180{
258 list_del(&page->lru); 181 return false;
259} 182}
260 183
261static inline bool balloon_page_movable(struct page *page) 184static inline bool balloon_page_movable(struct page *page)
@@ -289,9 +212,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
289 return GFP_HIGHUSER; 212 return GFP_HIGHUSER;
290} 213}
291 214
292static inline bool balloon_compaction_check(void)
293{
294 return false;
295}
296#endif /* CONFIG_BALLOON_COMPACTION */ 215#endif /* CONFIG_BALLOON_COMPACTION */
297#endif /* _LINUX_BALLOON_COMPACTION_H */ 216#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 0272e49135d0..729f48e6b20b 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -267,7 +267,7 @@ struct bcma_device {
267 u8 core_unit; 267 u8 core_unit;
268 268
269 u32 addr; 269 u32 addr;
270 u32 addr1; 270 u32 addr_s[8];
271 u32 wrap; 271 u32 wrap;
272 272
273 void __iomem *io_addr; 273 void __iomem *io_addr;
@@ -323,6 +323,8 @@ struct bcma_bus {
323 struct pci_dev *host_pci; 323 struct pci_dev *host_pci;
324 /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ 324 /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
325 struct sdio_func *host_sdio; 325 struct sdio_func *host_sdio;
326 /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
327 struct platform_device *host_pdev;
326 }; 328 };
327 329
328 struct bcma_chipinfo chipinfo; 330 struct bcma_chipinfo chipinfo;
@@ -332,10 +334,10 @@ struct bcma_bus {
332 struct bcma_device *mapped_core; 334 struct bcma_device *mapped_core;
333 struct list_head cores; 335 struct list_head cores;
334 u8 nr_cores; 336 u8 nr_cores;
335 u8 init_done:1;
336 u8 num; 337 u8 num;
337 338
338 struct bcma_drv_cc drv_cc; 339 struct bcma_drv_cc drv_cc;
340 struct bcma_drv_cc_b drv_cc_b;
339 struct bcma_drv_pci drv_pci[2]; 341 struct bcma_drv_pci drv_pci[2];
340 struct bcma_drv_pcie2 drv_pcie2; 342 struct bcma_drv_pcie2 drv_pcie2;
341 struct bcma_drv_mips drv_mips; 343 struct bcma_drv_mips drv_mips;
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 63d105cd14a3..db6fa217f98b 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -644,6 +644,12 @@ struct bcma_drv_cc {
644#endif 644#endif
645}; 645};
646 646
647struct bcma_drv_cc_b {
648 struct bcma_device *core;
649 u8 setup_done:1;
650 void __iomem *mii;
651};
652
647/* Register access */ 653/* Register access */
648#define bcma_cc_read32(cc, offset) \ 654#define bcma_cc_read32(cc, offset) \
649 bcma_read32((cc)->core, offset) 655 bcma_read32((cc)->core, offset)
@@ -699,4 +705,6 @@ extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid);
699 705
700extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc); 706extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc);
701 707
708void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value);
709
702#endif /* LINUX_BCMA_DRIVER_CC_H_ */ 710#endif /* LINUX_BCMA_DRIVER_CC_H_ */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 917dcd7965e7..e64ae7bf80a1 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -39,6 +39,11 @@
39#define BCMA_RESET_CTL_RESET 0x0001 39#define BCMA_RESET_CTL_RESET 0x0001
40#define BCMA_RESET_ST 0x0804 40#define BCMA_RESET_ST 0x0804
41 41
42#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003
43#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000
44#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001
45#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002
46
42/* BCMA PCI config space registers. */ 47/* BCMA PCI config space registers. */
43#define BCMA_PCI_PMCSR 0x44 48#define BCMA_PCI_PMCSR 0x44
44#define BCMA_PCI_PE 0x100 49#define BCMA_PCI_PE 0x100
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 4203c5593b9f..f24d245f8394 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -10,6 +10,7 @@ struct bcma_soc {
10}; 10};
11 11
12int __init bcma_host_soc_register(struct bcma_soc *soc); 12int __init bcma_host_soc_register(struct bcma_soc *soc);
13int __init bcma_host_soc_init(struct bcma_soc *soc);
13 14
14int bcma_bus_register(struct bcma_bus *bus); 15int bcma_bus_register(struct bcma_bus *bus);
15 16
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cbc5833fb221..be5fd38bd5a0 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -32,26 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w);
32 */ 32 */
33#include <asm/bitops.h> 33#include <asm/bitops.h>
34 34
35/*
36 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
37 * We need the ugly external functions to break header recursion hell.
38 */
39#ifndef smp_mb__before_clear_bit
40static inline void __deprecated smp_mb__before_clear_bit(void)
41{
42 extern void __smp_mb__before_atomic(void);
43 __smp_mb__before_atomic();
44}
45#endif
46
47#ifndef smp_mb__after_clear_bit
48static inline void __deprecated smp_mb__after_clear_bit(void)
49{
50 extern void __smp_mb__after_atomic(void);
51 __smp_mb__after_atomic();
52}
53#endif
54
55#define for_each_set_bit(bit, addr, size) \ 35#define for_each_set_bit(bit, addr, size) \
56 for ((bit) = find_first_bit((addr), (size)); \ 36 for ((bit) = find_first_bit((addr), (size)); \
57 (bit) < (size); \ 37 (bit) < (size); \
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index eb726b9c5762..c13a0c09faea 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -127,10 +127,9 @@ enum {
127 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ 127 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
128 128
129 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 129 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
130 BLK_MQ_F_SHOULD_SORT = 1 << 1, 130 BLK_MQ_F_TAG_SHARED = 1 << 1,
131 BLK_MQ_F_TAG_SHARED = 1 << 2, 131 BLK_MQ_F_SG_MERGE = 1 << 2,
132 BLK_MQ_F_SG_MERGE = 1 << 3, 132 BLK_MQ_F_SYSFS_UP = 1 << 3,
133 BLK_MQ_F_SYSFS_UP = 1 << 4,
134 133
135 BLK_MQ_S_STOPPED = 0, 134 BLK_MQ_S_STOPPED = 0,
136 BLK_MQ_S_TAG_ACTIVE = 1, 135 BLK_MQ_S_TAG_ACTIVE = 1,
@@ -141,6 +140,7 @@ enum {
141}; 140};
142 141
143struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 142struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
143void blk_mq_finish_init(struct request_queue *q);
144int blk_mq_register_disk(struct gendisk *); 144int blk_mq_register_disk(struct gendisk *);
145void blk_mq_unregister_disk(struct gendisk *); 145void blk_mq_unregister_disk(struct gendisk *);
146 146
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 518b46555b80..87be398166d3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1564,7 +1564,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1564} 1564}
1565static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1565static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1566{ 1566{
1567 return 0; 1567 return NULL;
1568} 1568}
1569static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1569static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1570{ 1570{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000000000000..3cf91754a957
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,136 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _LINUX_BPF_H
8#define _LINUX_BPF_H 1
9
10#include <uapi/linux/bpf.h>
11#include <linux/workqueue.h>
12#include <linux/file.h>
13
14struct bpf_map;
15
16/* map is generic key/value storage optionally accesible by eBPF programs */
17struct bpf_map_ops {
18 /* funcs callable from userspace (via syscall) */
19 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
20 void (*map_free)(struct bpf_map *);
21 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
22
23 /* funcs callable from userspace and from eBPF programs */
24 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
25 int (*map_update_elem)(struct bpf_map *map, void *key, void *value);
26 int (*map_delete_elem)(struct bpf_map *map, void *key);
27};
28
29struct bpf_map {
30 atomic_t refcnt;
31 enum bpf_map_type map_type;
32 u32 key_size;
33 u32 value_size;
34 u32 max_entries;
35 struct bpf_map_ops *ops;
36 struct work_struct work;
37};
38
39struct bpf_map_type_list {
40 struct list_head list_node;
41 struct bpf_map_ops *ops;
42 enum bpf_map_type type;
43};
44
45void bpf_register_map_type(struct bpf_map_type_list *tl);
46void bpf_map_put(struct bpf_map *map);
47struct bpf_map *bpf_map_get(struct fd f);
48
49/* function argument constraints */
50enum bpf_arg_type {
51 ARG_ANYTHING = 0, /* any argument is ok */
52
53 /* the following constraints used to prototype
54 * bpf_map_lookup/update/delete_elem() functions
55 */
56 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
57 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
58 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
59
60 /* the following constraints used to prototype bpf_memcmp() and other
61 * functions that access data on eBPF program stack
62 */
63 ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
64 ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
65};
66
67/* type of values returned from helper functions */
68enum bpf_return_type {
69 RET_INTEGER, /* function returns integer */
70 RET_VOID, /* function doesn't return anything */
71 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
72};
73
74/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
75 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
76 * instructions after verifying
77 */
78struct bpf_func_proto {
79 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
80 bool gpl_only;
81 enum bpf_return_type ret_type;
82 enum bpf_arg_type arg1_type;
83 enum bpf_arg_type arg2_type;
84 enum bpf_arg_type arg3_type;
85 enum bpf_arg_type arg4_type;
86 enum bpf_arg_type arg5_type;
87};
88
89/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
90 * the first argument to eBPF programs.
91 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
92 */
93struct bpf_context;
94
95enum bpf_access_type {
96 BPF_READ = 1,
97 BPF_WRITE = 2
98};
99
100struct bpf_verifier_ops {
101 /* return eBPF function prototype for verification */
102 const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
103
104 /* return true if 'size' wide access at offset 'off' within bpf_context
105 * with 'type' (read or write) is allowed
106 */
107 bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
108};
109
110struct bpf_prog_type_list {
111 struct list_head list_node;
112 struct bpf_verifier_ops *ops;
113 enum bpf_prog_type type;
114};
115
116void bpf_register_prog_type(struct bpf_prog_type_list *tl);
117
118struct bpf_prog;
119
120struct bpf_prog_aux {
121 atomic_t refcnt;
122 bool is_gpl_compatible;
123 enum bpf_prog_type prog_type;
124 struct bpf_verifier_ops *ops;
125 struct bpf_map **used_maps;
126 u32 used_map_cnt;
127 struct bpf_prog *prog;
128 struct work_struct work;
129};
130
131void bpf_prog_put(struct bpf_prog *prog);
132struct bpf_prog *bpf_prog_get(u32 ufd);
133/* verify correctness of eBPF program */
134int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
135
136#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 61219b9b3445..7ccd928cc1f2 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,7 +13,11 @@
13#define PHY_ID_BCM5461 0x002060c0 13#define PHY_ID_BCM5461 0x002060c0
14#define PHY_ID_BCM57780 0x03625d90 14#define PHY_ID_BCM57780 0x03625d90
15 15
16#define PHY_ID_BCM7250 0xae025280
17#define PHY_ID_BCM7364 0xae025260
16#define PHY_ID_BCM7366 0x600d8490 18#define PHY_ID_BCM7366 0x600d8490
19#define PHY_ID_BCM7425 0x03625e60
20#define PHY_ID_BCM7429 0x600d8730
17#define PHY_ID_BCM7439 0x600d8480 21#define PHY_ID_BCM7439 0x600d8480
18#define PHY_ID_BCM7445 0x600d8510 22#define PHY_ID_BCM7445 0x600d8510
19 23
@@ -21,9 +25,9 @@
21#define PHY_BCM_OUI_1 0x00206000 25#define PHY_BCM_OUI_1 0x00206000
22#define PHY_BCM_OUI_2 0x0143bc00 26#define PHY_BCM_OUI_2 0x0143bc00
23#define PHY_BCM_OUI_3 0x03625c00 27#define PHY_BCM_OUI_3 0x03625c00
24#define PHY_BCM_OUI_4 0x600d0000 28#define PHY_BCM_OUI_4 0x600d8400
25#define PHY_BCM_OUI_5 0x03625e00 29#define PHY_BCM_OUI_5 0x03625e00
26 30#define PHY_BCM_OUI_6 0xae025000
27 31
28#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 32#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
29#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 33#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
@@ -38,7 +42,8 @@
38#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 42#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
39#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 43#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
40/* Broadcom BCM7xxx specific workarounds */ 44/* Broadcom BCM7xxx specific workarounds */
41#define PHY_BRCM_100MBPS_WAR 0x00010000 45#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
46#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
42#define PHY_BCM_FLAGS_VALID 0x80000000 47#define PHY_BCM_FLAGS_VALID 0x80000000
43 48
44/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ 49/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
@@ -92,4 +97,130 @@
92 97
93#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 98#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
94 99
100/*
101 * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
102 * BCM5482, and possibly some others.
103 */
104#define BCM_LED_SRC_LINKSPD1 0x0
105#define BCM_LED_SRC_LINKSPD2 0x1
106#define BCM_LED_SRC_XMITLED 0x2
107#define BCM_LED_SRC_ACTIVITYLED 0x3
108#define BCM_LED_SRC_FDXLED 0x4
109#define BCM_LED_SRC_SLAVE 0x5
110#define BCM_LED_SRC_INTR 0x6
111#define BCM_LED_SRC_QUALITY 0x7
112#define BCM_LED_SRC_RCVLED 0x8
113#define BCM_LED_SRC_MULTICOLOR1 0xa
114#define BCM_LED_SRC_OPENSHORT 0xb
115#define BCM_LED_SRC_OFF 0xe /* Tied high */
116#define BCM_LED_SRC_ON 0xf /* Tied low */
117
118
119/*
120 * BCM5482: Shadow registers
121 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
122 * register to access.
123 */
124/* 00101: Spare Control Register 3 */
125#define BCM54XX_SHD_SCR3 0x05
126#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
127#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
128#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
129
130/* 01010: Auto Power-Down */
131#define BCM54XX_SHD_APD 0x0a
132#define BCM54XX_SHD_APD_EN 0x0020
133
134#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
135 /* LED3 / ~LINKSPD[2] selector */
136#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
137 /* LED1 / ~LINKSPD[1] selector */
138#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
139#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
140#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
141#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
142#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
143#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
144#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
145
146
147/*
148 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
149 */
150#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
151#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
152#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
153#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
154#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
155#define MII_BCM54XX_EXP_EXP08 0x0F08
156#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
157#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
158#define MII_BCM54XX_EXP_EXP75 0x0f75
159#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
160#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
161#define MII_BCM54XX_EXP_EXP96 0x0f96
162#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
163#define MII_BCM54XX_EXP_EXP97 0x0f97
164#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
165
166/*
167 * BCM5482: Secondary SerDes registers
168 */
169#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
170#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
171#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
172#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
173#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
174
175
176/*****************************************************************************/
177/* Fast Ethernet Transceiver definitions. */
178/*****************************************************************************/
179
180#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */
181#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */
182#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */
183#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */
184#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */
185#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */
186
187#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */
188#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */
189
190
191/*** Shadow register definitions ***/
192
193#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */
194#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
195
196#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
197#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
198#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
199
200#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
201#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
202
203/*
204 * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
205 * 0x1c shadow registers.
206 */
207static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
208{
209 phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
210 return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
211}
212
213static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
214 u16 val)
215{
216 return phy_write(phydev, MII_BCM54XX_SHD,
217 MII_BCM54XX_SHD_WRITE |
218 MII_BCM54XX_SHD_VAL(shadow) |
219 MII_BCM54XX_SHD_DATA(val));
220}
221
222#define BRCM_CL45VEN_EEE_CONTROL 0x803d
223#define LPI_FEATURE_EN 0x8000
224#define LPI_FEATURE_EN_DIG1000X 0x4000
225
95#endif /* _LINUX_BRCMPHY_H */ 226#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index ebcc9d146219..7f437036baa4 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -27,6 +27,13 @@ struct ccp_cmd;
27 defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) 27 defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
28 28
29/** 29/**
30 * ccp_present - check if a CCP device is present
31 *
32 * Returns zero if a CCP device is present, -ENODEV otherwise.
33 */
34int ccp_present(void);
35
36/**
30 * ccp_enqueue_cmd - queue an operation for processing by the CCP 37 * ccp_enqueue_cmd - queue an operation for processing by the CCP
31 * 38 *
32 * @cmd: ccp_cmd struct to be processed 39 * @cmd: ccp_cmd struct to be processed
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
53 60
54#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ 61#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
55 62
63static inline int ccp_present(void)
64{
65 return -ENODEV;
66}
67
56static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) 68static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
57{ 69{
58 return -ENODEV; 70 return -ENODEV;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b5223c570eba..1d5196889048 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -27,7 +27,6 @@
27 27
28struct cgroup_root; 28struct cgroup_root;
29struct cgroup_subsys; 29struct cgroup_subsys;
30struct inode;
31struct cgroup; 30struct cgroup;
32 31
33extern int cgroup_init_early(void); 32extern int cgroup_init_early(void);
@@ -38,7 +37,8 @@ extern void cgroup_exit(struct task_struct *p);
38extern int cgroupstats_build(struct cgroupstats *stats, 37extern int cgroupstats_build(struct cgroupstats *stats,
39 struct dentry *dentry); 38 struct dentry *dentry);
40 39
41extern int proc_cgroup_show(struct seq_file *, void *); 40extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
41 struct pid *pid, struct task_struct *tsk);
42 42
43/* define the enumeration of all cgroup subsystems */ 43/* define the enumeration of all cgroup subsystems */
44#define SUBSYS(_x) _x ## _cgrp_id, 44#define SUBSYS(_x) _x ## _cgrp_id,
@@ -161,11 +161,6 @@ static inline void css_put(struct cgroup_subsys_state *css)
161 161
162/* bits in struct cgroup flags field */ 162/* bits in struct cgroup flags field */
163enum { 163enum {
164 /*
165 * Control Group has previously had a child cgroup or a task,
166 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
167 */
168 CGRP_RELEASABLE,
169 /* Control Group requires release notifications to userspace */ 164 /* Control Group requires release notifications to userspace */
170 CGRP_NOTIFY_ON_RELEASE, 165 CGRP_NOTIFY_ON_RELEASE,
171 /* 166 /*
@@ -235,13 +230,6 @@ struct cgroup {
235 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 230 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
236 231
237 /* 232 /*
238 * Linked list running through all cgroups that can
239 * potentially be reaped by the release agent. Protected by
240 * release_list_lock
241 */
242 struct list_head release_list;
243
244 /*
245 * list of pidlists, up to two for each namespace (one for procs, one 233 * list of pidlists, up to two for each namespace (one for procs, one
246 * for tasks); created on demand. 234 * for tasks); created on demand.
247 */ 235 */
@@ -250,6 +238,9 @@ struct cgroup {
250 238
251 /* used to wait for offlining of csses */ 239 /* used to wait for offlining of csses */
252 wait_queue_head_t offline_waitq; 240 wait_queue_head_t offline_waitq;
241
242 /* used to schedule release agent */
243 struct work_struct release_agent_work;
253}; 244};
254 245
255#define MAX_CGROUP_ROOT_NAMELEN 64 246#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -536,13 +527,10 @@ static inline bool cgroup_has_tasks(struct cgroup *cgrp)
536 return !list_empty(&cgrp->cset_links); 527 return !list_empty(&cgrp->cset_links);
537} 528}
538 529
539/* returns ino associated with a cgroup, 0 indicates unmounted root */ 530/* returns ino associated with a cgroup */
540static inline ino_t cgroup_ino(struct cgroup *cgrp) 531static inline ino_t cgroup_ino(struct cgroup *cgrp)
541{ 532{
542 if (cgrp->kn) 533 return cgrp->kn->ino;
543 return cgrp->kn->ino;
544 else
545 return 0;
546} 534}
547 535
548/* cft/css accessors for cftype->write() operation */ 536/* cft/css accessors for cftype->write() operation */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index fb5e097d8f72..afb44bfaf8d1 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -238,7 +238,7 @@ void clk_put(struct clk *clk);
238 238
239/** 239/**
240 * devm_clk_put - "free" a managed clock source 240 * devm_clk_put - "free" a managed clock source
241 * @dev: device used to acuqire the clock 241 * @dev: device used to acquire the clock
242 * @clk: clock source acquired with devm_clk_get() 242 * @clk: clock source acquired with devm_clk_get()
243 * 243 *
244 * Note: drivers must ensure that all clk_enable calls made on this 244 * Note: drivers must ensure that all clk_enable calls made on this
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index de4268d4987a..c8e3b3d1eded 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -125,6 +125,7 @@ extern void __iomem *at91_pmc_base;
125#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */ 125#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */
126#define AT91_PMC_PLLADIV2_OFF (0 << 12) 126#define AT91_PMC_PLLADIV2_OFF (0 << 12)
127#define AT91_PMC_PLLADIV2_ON (1 << 12) 127#define AT91_PMC_PLLADIV2_ON (1 << 12)
128#define AT91_PMC_H32MXDIV BIT(24)
128 129
129#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ 130#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
130#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ 131#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
diff --git a/include/linux/com20020.h b/include/linux/com20020.h
index 5dcfb944b6ce..85898995b234 100644
--- a/include/linux/com20020.h
+++ b/include/linux/com20020.h
@@ -41,6 +41,35 @@ extern const struct net_device_ops com20020_netdev_ops;
41#define BUS_ALIGN 1 41#define BUS_ALIGN 1
42#endif 42#endif
43 43
44#define PLX_PCI_MAX_CARDS 2
45
46struct com20020_pci_channel_map {
47 u32 bar;
48 u32 offset;
49 u32 size; /* 0x00 - auto, e.g. length of entire bar */
50};
51
52struct com20020_pci_card_info {
53 const char *name;
54 int devcount;
55
56 struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
57
58 unsigned int flags;
59};
60
61struct com20020_priv {
62 struct com20020_pci_card_info *ci;
63 struct list_head list_dev;
64};
65
66struct com20020_dev {
67 struct list_head list;
68 struct net_device *dev;
69
70 struct com20020_priv *pci_priv;
71 int index;
72};
44 73
45#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ 74#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
46#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ 75#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 01e3132820da..60bdf8dc02a3 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -2,14 +2,24 @@
2#define _LINUX_COMPACTION_H 2#define _LINUX_COMPACTION_H
3 3
4/* Return values for compact_zone() and try_to_compact_pages() */ 4/* Return values for compact_zone() and try_to_compact_pages() */
5/* compaction didn't start as it was deferred due to past failures */
6#define COMPACT_DEFERRED 0
5/* compaction didn't start as it was not possible or direct reclaim was more suitable */ 7/* compaction didn't start as it was not possible or direct reclaim was more suitable */
6#define COMPACT_SKIPPED 0 8#define COMPACT_SKIPPED 1
7/* compaction should continue to another pageblock */ 9/* compaction should continue to another pageblock */
8#define COMPACT_CONTINUE 1 10#define COMPACT_CONTINUE 2
9/* direct compaction partially compacted a zone and there are suitable pages */ 11/* direct compaction partially compacted a zone and there are suitable pages */
10#define COMPACT_PARTIAL 2 12#define COMPACT_PARTIAL 3
11/* The full zone was compacted */ 13/* The full zone was compacted */
12#define COMPACT_COMPLETE 3 14#define COMPACT_COMPLETE 4
15
16/* Used to signal whether compaction detected need_sched() or lock contention */
17/* No contention detected */
18#define COMPACT_CONTENDED_NONE 0
19/* Either need_sched() was true or fatal signal pending */
20#define COMPACT_CONTENDED_SCHED 1
21/* Zone lock or lru_lock was contended in async compaction */
22#define COMPACT_CONTENDED_LOCK 2
13 23
14#ifdef CONFIG_COMPACTION 24#ifdef CONFIG_COMPACTION
15extern int sysctl_compact_memory; 25extern int sysctl_compact_memory;
@@ -22,7 +32,8 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
22extern int fragmentation_index(struct zone *zone, unsigned int order); 32extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 33extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask, 34 int order, gfp_t gfp_mask, nodemask_t *mask,
25 enum migrate_mode mode, bool *contended); 35 enum migrate_mode mode, int *contended,
36 struct zone **candidate_zone);
26extern void compact_pgdat(pg_data_t *pgdat, int order); 37extern void compact_pgdat(pg_data_t *pgdat, int order);
27extern void reset_isolation_suitable(pg_data_t *pgdat); 38extern void reset_isolation_suitable(pg_data_t *pgdat);
28extern unsigned long compaction_suitable(struct zone *zone, int order); 39extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -91,7 +102,8 @@ static inline bool compaction_restarting(struct zone *zone, int order)
91#else 102#else
92static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 103static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
93 int order, gfp_t gfp_mask, nodemask_t *nodemask, 104 int order, gfp_t gfp_mask, nodemask_t *nodemask,
94 enum migrate_mode mode, bool *contended) 105 enum migrate_mode mode, int *contended,
106 struct zone **candidate_zone)
95{ 107{
96 return COMPACT_CONTINUE; 108 return COMPACT_CONTINUE;
97} 109}
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 95978ad7fcdd..b2d9a43012b2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -213,6 +213,7 @@ extern struct bus_type cpu_subsys;
213extern void cpu_hotplug_begin(void); 213extern void cpu_hotplug_begin(void);
214extern void cpu_hotplug_done(void); 214extern void cpu_hotplug_done(void);
215extern void get_online_cpus(void); 215extern void get_online_cpus(void);
216extern bool try_get_online_cpus(void);
216extern void put_online_cpus(void); 217extern void put_online_cpus(void);
217extern void cpu_hotplug_disable(void); 218extern void cpu_hotplug_disable(void);
218extern void cpu_hotplug_enable(void); 219extern void cpu_hotplug_enable(void);
@@ -230,6 +231,7 @@ int cpu_down(unsigned int cpu);
230static inline void cpu_hotplug_begin(void) {} 231static inline void cpu_hotplug_begin(void) {}
231static inline void cpu_hotplug_done(void) {} 232static inline void cpu_hotplug_done(void) {}
232#define get_online_cpus() do { } while (0) 233#define get_online_cpus() do { } while (0)
234#define try_get_online_cpus() true
233#define put_online_cpus() do { } while (0) 235#define put_online_cpus() do { } while (0)
234#define cpu_hotplug_disable() do { } while (0) 236#define cpu_hotplug_disable() do { } while (0)
235#define cpu_hotplug_enable() do { } while (0) 237#define cpu_hotplug_enable() do { } while (0)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7d1955afa62c..138336b6bb04 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -112,6 +112,9 @@ struct cpufreq_policy {
112 spinlock_t transition_lock; 112 spinlock_t transition_lock;
113 wait_queue_head_t transition_wait; 113 wait_queue_head_t transition_wait;
114 struct task_struct *transition_task; /* Task which is doing the transition */ 114 struct task_struct *transition_task; /* Task which is doing the transition */
115
116 /* For cpufreq driver's internal use */
117 void *driver_data;
115}; 118};
116 119
117/* Only for ACPI */ 120/* Only for ACPI */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index ade2390ffe92..2f073db7392e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -86,19 +86,20 @@ extern void __cpuset_memory_pressure_bump(void);
86 86
87extern void cpuset_task_status_allowed(struct seq_file *m, 87extern void cpuset_task_status_allowed(struct seq_file *m,
88 struct task_struct *task); 88 struct task_struct *task);
89extern int proc_cpuset_show(struct seq_file *, void *); 89extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
90 struct pid *pid, struct task_struct *tsk);
90 91
91extern int cpuset_mem_spread_node(void); 92extern int cpuset_mem_spread_node(void);
92extern int cpuset_slab_spread_node(void); 93extern int cpuset_slab_spread_node(void);
93 94
94static inline int cpuset_do_page_mem_spread(void) 95static inline int cpuset_do_page_mem_spread(void)
95{ 96{
96 return current->flags & PF_SPREAD_PAGE; 97 return task_spread_page(current);
97} 98}
98 99
99static inline int cpuset_do_slab_mem_spread(void) 100static inline int cpuset_do_slab_mem_spread(void)
100{ 101{
101 return current->flags & PF_SPREAD_SLAB; 102 return task_spread_slab(current);
102} 103}
103 104
104extern int current_cpuset_is_being_rebound(void); 105extern int current_cpuset_is_being_rebound(void);
diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h
deleted file mode 100644
index 362bf19d6cf1..000000000000
--- a/include/linux/cycx_x25.h
+++ /dev/null
@@ -1,125 +0,0 @@
1#ifndef _CYCX_X25_H
2#define _CYCX_X25_H
3/*
4* cycx_x25.h Cyclom X.25 firmware API definitions.
5*
6* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7*
8* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
9*
10* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com>
11*
12* This program is free software; you can redistribute it and/or
13* modify it under the terms of the GNU General Public License
14* as published by the Free Software Foundation; either version
15* 2 of the License, or (at your option) any later version.
16* ============================================================================
17* 2000/04/02 acme dprintk and cycx_debug
18* 1999/01/03 acme judicious use of data types
19* 1999/01/02 acme #define X25_ACK_N3 0x4411
20* 1998/12/28 acme cleanup: lot'o'things removed
21* commands listed,
22* TX25Cmd & TX25Config structs
23* typedef'ed
24*/
25#ifndef PACKED
26#define PACKED __attribute__((packed))
27#endif
28
29/* X.25 shared memory layout. */
30#define X25_MBOX_OFFS 0x300 /* general mailbox block */
31#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
32
33/* Debug */
34#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a)
35
36extern unsigned int cycx_debug;
37
38/* Data Structures */
39/* X.25 Command Block. */
40struct cycx_x25_cmd {
41 u16 command;
42 u16 link; /* values: 0 or 1 */
43 u16 len; /* values: 0 thru 0x205 (517) */
44 u32 buf;
45} PACKED;
46
47/* Defines for the 'command' field. */
48#define X25_CONNECT_REQUEST 0x4401
49#define X25_CONNECT_RESPONSE 0x4402
50#define X25_DISCONNECT_REQUEST 0x4403
51#define X25_DISCONNECT_RESPONSE 0x4404
52#define X25_DATA_REQUEST 0x4405
53#define X25_ACK_TO_VC 0x4406
54#define X25_INTERRUPT_RESPONSE 0x4407
55#define X25_CONFIG 0x4408
56#define X25_CONNECT_INDICATION 0x4409
57#define X25_CONNECT_CONFIRM 0x440A
58#define X25_DISCONNECT_INDICATION 0x440B
59#define X25_DISCONNECT_CONFIRM 0x440C
60#define X25_DATA_INDICATION 0x440E
61#define X25_INTERRUPT_INDICATION 0x440F
62#define X25_ACK_FROM_VC 0x4410
63#define X25_ACK_N3 0x4411
64#define X25_CONNECT_COLLISION 0x4413
65#define X25_N3WIN 0x4414
66#define X25_LINE_ON 0x4415
67#define X25_LINE_OFF 0x4416
68#define X25_RESET_REQUEST 0x4417
69#define X25_LOG 0x4500
70#define X25_STATISTIC 0x4600
71#define X25_TRACE 0x4700
72#define X25_N2TRACEXC 0x4702
73#define X25_N3TRACEXC 0x4703
74
75/**
76 * struct cycx_x25_config - cyclom2x x25 firmware configuration
77 * @link - link number
78 * @speed - line speed
79 * @clock - internal/external
80 * @n2 - # of level 2 retransm.(values: 1 thru FF)
81 * @n2win - level 2 window (values: 1 thru 7)
82 * @n3win - level 3 window (values: 1 thru 7)
83 * @nvc - # of logical channels (values: 1 thru 64)
84 * @pktlen - level 3 packet length - log base 2 of size
85 * @locaddr - my address
86 * @remaddr - remote address
87 * @t1 - time, in seconds
88 * @t2 - time, in seconds
89 * @t21 - time, in seconds
90 * @npvc - # of permanent virt. circuits (1 thru nvc)
91 * @t23 - time, in seconds
92 * @flags - see dosx25.doc, in portuguese, for details
93 */
94struct cycx_x25_config {
95 u8 link;
96 u8 speed;
97 u8 clock;
98 u8 n2;
99 u8 n2win;
100 u8 n3win;
101 u8 nvc;
102 u8 pktlen;
103 u8 locaddr;
104 u8 remaddr;
105 u16 t1;
106 u16 t2;
107 u8 t21;
108 u8 npvc;
109 u8 t23;
110 u8 flags;
111} PACKED;
112
113struct cycx_x25_stats {
114 u16 rx_crc_errors;
115 u16 rx_over_errors;
116 u16 n2_tx_frames;
117 u16 n2_rx_frames;
118 u16 tx_timeouts;
119 u16 rx_timeouts;
120 u16 n3_tx_packets;
121 u16 n3_rx_packets;
122 u16 tx_aborts;
123 u16 rx_aborts;
124} PACKED;
125#endif /* _CYCX_X25_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index e4ae2ad48d07..b2a2a08523bf 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -11,7 +11,6 @@
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/lockref.h> 12#include <linux/lockref.h>
13 13
14struct nameidata;
15struct path; 14struct path;
16struct vfsmount; 15struct vfsmount;
17 16
@@ -55,6 +54,7 @@ struct qstr {
55#define QSTR_INIT(n,l) { { { .len = l } }, .name = n } 54#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
56#define hashlen_hash(hashlen) ((u32) (hashlen)) 55#define hashlen_hash(hashlen) ((u32) (hashlen))
57#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) 56#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
57#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
58 58
59struct dentry_stat_t { 59struct dentry_stat_t {
60 long nr_dentry; 60 long nr_dentry;
@@ -225,11 +225,6 @@ struct dentry_operations {
225 225
226extern seqlock_t rename_lock; 226extern seqlock_t rename_lock;
227 227
228static inline int dname_external(const struct dentry *dentry)
229{
230 return dentry->d_name.name != dentry->d_iname;
231}
232
233/* 228/*
234 * These are the low-level FS interfaces to the dcache.. 229 * These are the low-level FS interfaces to the dcache..
235 */ 230 */
@@ -253,7 +248,7 @@ extern struct dentry * d_obtain_root(struct inode *);
253extern void shrink_dcache_sb(struct super_block *); 248extern void shrink_dcache_sb(struct super_block *);
254extern void shrink_dcache_parent(struct dentry *); 249extern void shrink_dcache_parent(struct dentry *);
255extern void shrink_dcache_for_umount(struct super_block *); 250extern void shrink_dcache_for_umount(struct super_block *);
256extern int d_invalidate(struct dentry *); 251extern void d_invalidate(struct dentry *);
257 252
258/* only used at mount-time */ 253/* only used at mount-time */
259extern struct dentry * d_make_root(struct inode *); 254extern struct dentry * d_make_root(struct inode *);
@@ -268,7 +263,6 @@ extern void d_prune_aliases(struct inode *);
268 263
269/* test whether we have any submounts in a subdir tree */ 264/* test whether we have any submounts in a subdir tree */
270extern int have_submounts(struct dentry *); 265extern int have_submounts(struct dentry *);
271extern int check_submounts_and_drop(struct dentry *);
272 266
273/* 267/*
274 * This adds the entry to the hash queues. 268 * This adds the entry to the hash queues.
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
new file mode 100644
index 000000000000..c0a360e99f64
--- /dev/null
+++ b/include/linux/devcoredump.h
@@ -0,0 +1,35 @@
1#ifndef __DEVCOREDUMP_H
2#define __DEVCOREDUMP_H
3
4#include <linux/device.h>
5#include <linux/module.h>
6#include <linux/vmalloc.h>
7
8#ifdef CONFIG_DEV_COREDUMP
9void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
10 gfp_t gfp);
11
12void dev_coredumpm(struct device *dev, struct module *owner,
13 const void *data, size_t datalen, gfp_t gfp,
14 ssize_t (*read)(char *buffer, loff_t offset, size_t count,
15 const void *data, size_t datalen),
16 void (*free)(const void *data));
17#else
18static inline void dev_coredumpv(struct device *dev, const void *data,
19 size_t datalen, gfp_t gfp)
20{
21 vfree(data);
22}
23
24static inline void
25dev_coredumpm(struct device *dev, struct module *owner,
26 const void *data, size_t datalen, gfp_t gfp,
27 ssize_t (*read)(char *buffer, loff_t offset, size_t count,
28 const void *data, size_t datalen),
29 void (*free)(const void *data))
30{
31 free(data);
32}
33#endif /* CONFIG_DEV_COREDUMP */
34
35#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 43d183aeb25b..a608e237f0a8 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -607,8 +607,8 @@ extern int devres_release_group(struct device *dev, void *id);
607extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); 607extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
608extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 608extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
609 va_list ap); 609 va_list ap);
610extern char *devm_kasprintf(struct device *dev, gfp_t gfp, 610extern __printf(3, 4)
611 const char *fmt, ...); 611char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
612static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 612static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
613{ 613{
614 return devm_kmalloc(dev, size, gfp | __GFP_ZERO); 614 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 931b70986272..d5d388160f42 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -263,6 +263,32 @@ struct dma_attrs;
263#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ 263#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
264 dma_unmap_sg(dev, sgl, nents, dir) 264 dma_unmap_sg(dev, sgl, nents, dir)
265 265
266#else
267static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
268 dma_addr_t *dma_addr, gfp_t gfp)
269{
270 DEFINE_DMA_ATTRS(attrs);
271 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
272 return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
273}
274
275static inline void dma_free_writecombine(struct device *dev, size_t size,
276 void *cpu_addr, dma_addr_t dma_addr)
277{
278 DEFINE_DMA_ATTRS(attrs);
279 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
280 return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
281}
282
283static inline int dma_mmap_writecombine(struct device *dev,
284 struct vm_area_struct *vma,
285 void *cpu_addr, dma_addr_t dma_addr,
286 size_t size)
287{
288 DEFINE_DMA_ATTRS(attrs);
289 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
290 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
291}
266#endif /* CONFIG_HAVE_DMA_ATTRS */ 292#endif /* CONFIG_HAVE_DMA_ATTRS */
267 293
268#ifdef CONFIG_NEED_DMA_MAP_STATE 294#ifdef CONFIG_NEED_DMA_MAP_STATE
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1f9e642c66ad..212c5b9ac106 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -900,18 +900,6 @@ static inline void dmaengine_put(void)
900} 900}
901#endif 901#endif
902 902
903#ifdef CONFIG_NET_DMA
904#define net_dmaengine_get() dmaengine_get()
905#define net_dmaengine_put() dmaengine_put()
906#else
907static inline void net_dmaengine_get(void)
908{
909}
910static inline void net_dmaengine_put(void)
911{
912}
913#endif
914
915#ifdef CONFIG_ASYNC_TX_DMA 903#ifdef CONFIG_ASYNC_TX_DMA
916#define async_dmaengine_get() dmaengine_get() 904#define async_dmaengine_get() dmaengine_get()
917#define async_dmaengine_put() dmaengine_put() 905#define async_dmaengine_put() dmaengine_put()
@@ -933,16 +921,8 @@ async_dma_find_channel(enum dma_transaction_type type)
933 return NULL; 921 return NULL;
934} 922}
935#endif /* CONFIG_ASYNC_TX_DMA */ 923#endif /* CONFIG_ASYNC_TX_DMA */
936
937dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
938 void *dest, void *src, size_t len);
939dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
940 struct page *page, unsigned int offset, void *kdata, size_t len);
941dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
942 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
943 unsigned int src_off, size_t len);
944void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 924void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
945 struct dma_chan *chan); 925 struct dma_chan *chan);
946 926
947static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) 927static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
948{ 928{
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 2fe93b26b42f..4f1bbc68cd1b 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -42,7 +42,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
42#if defined(CONFIG_DYNAMIC_DEBUG) 42#if defined(CONFIG_DYNAMIC_DEBUG)
43extern int ddebug_remove_module(const char *mod_name); 43extern int ddebug_remove_module(const char *mod_name);
44extern __printf(2, 3) 44extern __printf(2, 3)
45int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); 45void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
46 46
47extern int ddebug_dyndbg_module_param_cb(char *param, char *val, 47extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
48 const char *modname); 48 const char *modname);
@@ -50,15 +50,15 @@ extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
50struct device; 50struct device;
51 51
52extern __printf(3, 4) 52extern __printf(3, 4)
53int __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, 53void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
54 const char *fmt, ...); 54 const char *fmt, ...);
55 55
56struct net_device; 56struct net_device;
57 57
58extern __printf(3, 4) 58extern __printf(3, 4)
59int __dynamic_netdev_dbg(struct _ddebug *descriptor, 59void __dynamic_netdev_dbg(struct _ddebug *descriptor,
60 const struct net_device *dev, 60 const struct net_device *dev,
61 const char *fmt, ...); 61 const char *fmt, ...);
62 62
63#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ 63#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
64 static struct _ddebug __aligned(8) \ 64 static struct _ddebug __aligned(8) \
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 5621547d631b..a4be70398ce1 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -73,14 +73,22 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
73{ 73{
74 BUG_ON(count > DQL_MAX_OBJECT); 74 BUG_ON(count > DQL_MAX_OBJECT);
75 75
76 dql->num_queued += count;
77 dql->last_obj_cnt = count; 76 dql->last_obj_cnt = count;
77
78 /* We want to force a write first, so that cpu do not attempt
79 * to get cache line containing last_obj_cnt, num_queued, adj_limit
80 * in Shared state, but directly does a Request For Ownership
81 * It is only a hint, we use barrier() only.
82 */
83 barrier();
84
85 dql->num_queued += count;
78} 86}
79 87
80/* Returns how many objects can be queued, < 0 indicates over limit. */ 88/* Returns how many objects can be queued, < 0 indicates over limit. */
81static inline int dql_avail(const struct dql *dql) 89static inline int dql_avail(const struct dql *dql)
82{ 90{
83 return dql->adj_limit - dql->num_queued; 91 return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
84} 92}
85 93
86/* Record number of completed objects and recalculate the limit. */ 94/* Record number of completed objects and recalculate the limit. */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9c5529dc6d07..733980fce8e3 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -29,6 +29,7 @@
29#include <asm/bitsperlong.h> 29#include <asm/bitsperlong.h>
30 30
31#ifdef __KERNEL__ 31#ifdef __KERNEL__
32u32 eth_get_headlen(void *data, unsigned int max_len);
32__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 33__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
33extern const struct header_ops eth_header_ops; 34extern const struct header_ops eth_header_ops;
34 35
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e658229fee39..c1a2d60dfb82 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -257,6 +257,10 @@ struct ethtool_ops {
257 struct ethtool_eeprom *, u8 *); 257 struct ethtool_eeprom *, u8 *);
258 int (*get_eee)(struct net_device *, struct ethtool_eee *); 258 int (*get_eee)(struct net_device *, struct ethtool_eee *);
259 int (*set_eee)(struct net_device *, struct ethtool_eee *); 259 int (*set_eee)(struct net_device *, struct ethtool_eee *);
260 int (*get_tunable)(struct net_device *,
261 const struct ethtool_tunable *, void *);
262 int (*set_tunable)(struct net_device *,
263 const struct ethtool_tunable *, const void *);
260 264
261 265
262}; 266};
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 8900fdf511c6..0b17ad43fbfc 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -34,8 +34,10 @@
34 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). 34 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
35 * @state_on: print_state is overriden with state_on if attached. 35 * @state_on: print_state is overriden with state_on if attached.
36 * If NULL, default method of extcon class is used. 36 * If NULL, default method of extcon class is used.
37 * @state_off: print_state is overriden with state_on if detached. 37 * @state_off: print_state is overriden with state_off if detached.
38 * If NUll, default method of extcon class is used. 38 * If NUll, default method of extcon class is used.
39 * @check_on_resume: Boolean describing whether to check the state of gpio
40 * while resuming from sleep.
39 * 41 *
40 * Note that in order for state_on or state_off to be valid, both state_on 42 * Note that in order for state_on or state_off to be valid, both state_on
41 * and state_off should be not NULL. If at least one of them is NULL, 43 * and state_off should be not NULL. If at least one of them is NULL,
diff --git a/include/linux/extcon/sm5502.h b/include/linux/extcon/sm5502.h
deleted file mode 100644
index 030526bf8d79..000000000000
--- a/include/linux/extcon/sm5502.h
+++ /dev/null
@@ -1,287 +0,0 @@
1/*
2 * sm5502.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __LINUX_EXTCON_SM5502_H
18#define __LINUX_EXTCON_SM5502_H
19
20enum sm5502_types {
21 TYPE_SM5502,
22};
23
24/* SM5502 registers */
25enum sm5502_reg {
26 SM5502_REG_DEVICE_ID = 0x01,
27 SM5502_REG_CONTROL,
28 SM5502_REG_INT1,
29 SM5502_REG_INT2,
30 SM5502_REG_INTMASK1,
31 SM5502_REG_INTMASK2,
32 SM5502_REG_ADC,
33 SM5502_REG_TIMING_SET1,
34 SM5502_REG_TIMING_SET2,
35 SM5502_REG_DEV_TYPE1,
36 SM5502_REG_DEV_TYPE2,
37 SM5502_REG_BUTTON1,
38 SM5502_REG_BUTTON2,
39 SM5502_REG_CAR_KIT_STATUS,
40 SM5502_REG_RSVD1,
41 SM5502_REG_RSVD2,
42 SM5502_REG_RSVD3,
43 SM5502_REG_RSVD4,
44 SM5502_REG_MANUAL_SW1,
45 SM5502_REG_MANUAL_SW2,
46 SM5502_REG_DEV_TYPE3,
47 SM5502_REG_RSVD5,
48 SM5502_REG_RSVD6,
49 SM5502_REG_RSVD7,
50 SM5502_REG_RSVD8,
51 SM5502_REG_RSVD9,
52 SM5502_REG_RESET,
53 SM5502_REG_RSVD10,
54 SM5502_REG_RESERVED_ID1,
55 SM5502_REG_RSVD11,
56 SM5502_REG_RSVD12,
57 SM5502_REG_RESERVED_ID2,
58 SM5502_REG_RSVD13,
59 SM5502_REG_OCP,
60 SM5502_REG_RSVD14,
61 SM5502_REG_RSVD15,
62 SM5502_REG_RSVD16,
63 SM5502_REG_RSVD17,
64 SM5502_REG_RSVD18,
65 SM5502_REG_RSVD19,
66 SM5502_REG_RSVD20,
67 SM5502_REG_RSVD21,
68 SM5502_REG_RSVD22,
69 SM5502_REG_RSVD23,
70 SM5502_REG_RSVD24,
71 SM5502_REG_RSVD25,
72 SM5502_REG_RSVD26,
73 SM5502_REG_RSVD27,
74 SM5502_REG_RSVD28,
75 SM5502_REG_RSVD29,
76 SM5502_REG_RSVD30,
77 SM5502_REG_RSVD31,
78 SM5502_REG_RSVD32,
79 SM5502_REG_RSVD33,
80 SM5502_REG_RSVD34,
81 SM5502_REG_RSVD35,
82 SM5502_REG_RSVD36,
83 SM5502_REG_RESERVED_ID3,
84
85 SM5502_REG_END,
86};
87
88/* Define SM5502 MASK/SHIFT constant */
89#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0
90#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3
91#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT)
92#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT)
93
94#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0
95#define SM5502_REG_CONTROL_WAIT_SHIFT 1
96#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2
97#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3
98#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4
99#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT)
100#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT)
101#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT)
102#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
103#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
104
105#define SM5502_REG_INTM1_ATTACH_SHIFT 0
106#define SM5502_REG_INTM1_DETACH_SHIFT 1
107#define SM5502_REG_INTM1_KP_SHIFT 2
108#define SM5502_REG_INTM1_LKP_SHIFT 3
109#define SM5502_REG_INTM1_LKR_SHIFT 4
110#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5
111#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6
112#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7
113#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT)
114#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT)
115#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT)
116#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT)
117#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT)
118#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT)
119#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT)
120#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT)
121
122#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0
123#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1
124#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2
125#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3
126#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4
127#define SM5502_REG_INTM2_MHL_SHIFT 5
128#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT)
129#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT)
130#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT)
131#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT)
132#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
133#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
134
135#define SM5502_REG_ADC_SHIFT 0
136#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT)
137
138#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4
139#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT)
140#define TIMING_KEY_PRESS_100MS 0x0
141#define TIMING_KEY_PRESS_200MS 0x1
142#define TIMING_KEY_PRESS_300MS 0x2
143#define TIMING_KEY_PRESS_400MS 0x3
144#define TIMING_KEY_PRESS_500MS 0x4
145#define TIMING_KEY_PRESS_600MS 0x5
146#define TIMING_KEY_PRESS_700MS 0x6
147#define TIMING_KEY_PRESS_800MS 0x7
148#define TIMING_KEY_PRESS_900MS 0x8
149#define TIMING_KEY_PRESS_1000MS 0x9
150#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0
151#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT)
152#define TIMING_ADC_DET_50MS 0x0
153#define TIMING_ADC_DET_100MS 0x1
154#define TIMING_ADC_DET_150MS 0x2
155#define TIMING_ADC_DET_200MS 0x3
156#define TIMING_ADC_DET_300MS 0x4
157#define TIMING_ADC_DET_400MS 0x5
158#define TIMING_ADC_DET_500MS 0x6
159#define TIMING_ADC_DET_600MS 0x7
160#define TIMING_ADC_DET_700MS 0x8
161#define TIMING_ADC_DET_800MS 0x9
162#define TIMING_ADC_DET_900MS 0xA
163#define TIMING_ADC_DET_1000MS 0xB
164
165#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4
166#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT)
167#define TIMING_SW_WAIT_10MS 0x0
168#define TIMING_SW_WAIT_30MS 0x1
169#define TIMING_SW_WAIT_50MS 0x2
170#define TIMING_SW_WAIT_70MS 0x3
171#define TIMING_SW_WAIT_90MS 0x4
172#define TIMING_SW_WAIT_110MS 0x5
173#define TIMING_SW_WAIT_130MS 0x6
174#define TIMING_SW_WAIT_150MS 0x7
175#define TIMING_SW_WAIT_170MS 0x8
176#define TIMING_SW_WAIT_190MS 0x9
177#define TIMING_SW_WAIT_210MS 0xA
178#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0
179#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT)
180#define TIMING_LONG_KEY_300MS 0x0
181#define TIMING_LONG_KEY_400MS 0x1
182#define TIMING_LONG_KEY_500MS 0x2
183#define TIMING_LONG_KEY_600MS 0x3
184#define TIMING_LONG_KEY_700MS 0x4
185#define TIMING_LONG_KEY_800MS 0x5
186#define TIMING_LONG_KEY_900MS 0x6
187#define TIMING_LONG_KEY_1000MS 0x7
188#define TIMING_LONG_KEY_1100MS 0x8
189#define TIMING_LONG_KEY_1200MS 0x9
190#define TIMING_LONG_KEY_1300MS 0xA
191#define TIMING_LONG_KEY_1400MS 0xB
192#define TIMING_LONG_KEY_1500MS 0xC
193
194#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0
195#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1
196#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2
197#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3
198#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4
199#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5
200#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6
201#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7
202#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT)
203#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT)
204#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT)
205#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT)
206#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT)
207#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT)
208#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
209#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
210
211#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0
212#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1
213#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2
214#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3
215#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4
216#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5
217#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6
218#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT)
219#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT)
220#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT)
221#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT)
222#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT)
223#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT)
224#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT)
225
226#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0
227#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2
228#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5
229#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT)
230#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT)
231#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT)
232#define VBUSIN_SWITCH_OPEN 0x0
233#define VBUSIN_SWITCH_VBUSOUT 0x1
234#define VBUSIN_SWITCH_MIC 0x2
235#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3
236#define DM_DP_CON_SWITCH_OPEN 0x0
237#define DM_DP_CON_SWITCH_USB 0x1
238#define DM_DP_CON_SWITCH_AUDIO 0x2
239#define DM_DP_CON_SWITCH_UART 0x3
240#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
241 | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
242#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
243 | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
244#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
245 | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
246#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
247 | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
248
249/* SM5502 Interrupts */
250enum sm5502_irq {
251 /* INT1 */
252 SM5502_IRQ_INT1_ATTACH,
253 SM5502_IRQ_INT1_DETACH,
254 SM5502_IRQ_INT1_KP,
255 SM5502_IRQ_INT1_LKP,
256 SM5502_IRQ_INT1_LKR,
257 SM5502_IRQ_INT1_OVP_EVENT,
258 SM5502_IRQ_INT1_OCP_EVENT,
259 SM5502_IRQ_INT1_OVP_OCP_DIS,
260
261 /* INT2 */
262 SM5502_IRQ_INT2_VBUS_DET,
263 SM5502_IRQ_INT2_REV_ACCE,
264 SM5502_IRQ_INT2_ADC_CHG,
265 SM5502_IRQ_INT2_STUCK_KEY,
266 SM5502_IRQ_INT2_STUCK_KEY_RCV,
267 SM5502_IRQ_INT2_MHL,
268
269 SM5502_IRQ_NUM,
270};
271
272#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0)
273#define SM5502_IRQ_INT1_DETACH_MASK BIT(1)
274#define SM5502_IRQ_INT1_KP_MASK BIT(2)
275#define SM5502_IRQ_INT1_LKP_MASK BIT(3)
276#define SM5502_IRQ_INT1_LKR_MASK BIT(4)
277#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5)
278#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6)
279#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7)
280#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0)
281#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1)
282#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2)
283#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3)
284#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4)
285#define SM5502_IRQ_INT2_MHL_MASK BIT(5)
286
287#endif /* __LINUX_EXTCON_SM5502_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 6ff0b0b42d47..860313a33a43 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -15,8 +15,9 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ 17#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
18#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */ 18#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
19#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */ 19#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
20#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
20#define F2FS_BLKSIZE 4096 /* support only 4KB block */ 21#define F2FS_BLKSIZE 4096 /* support only 4KB block */
21#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ 22#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
22#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) 23#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
@@ -24,6 +25,9 @@
24#define NULL_ADDR ((block_t)0) /* used as block_t addresses */ 25#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
25#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ 26#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
26 27
28/* 0, 1(node nid), 2(meta nid) are reserved node id */
29#define F2FS_RESERVED_NODE_NUM 3
30
27#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) 31#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
28#define F2FS_NODE_INO(sbi) (sbi->node_ino_num) 32#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
29#define F2FS_META_INO(sbi) (sbi->meta_ino_num) 33#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
@@ -82,11 +86,14 @@ struct f2fs_super_block {
82/* 86/*
83 * For checkpoint 87 * For checkpoint
84 */ 88 */
89#define CP_FSCK_FLAG 0x00000010
85#define CP_ERROR_FLAG 0x00000008 90#define CP_ERROR_FLAG 0x00000008
86#define CP_COMPACT_SUM_FLAG 0x00000004 91#define CP_COMPACT_SUM_FLAG 0x00000004
87#define CP_ORPHAN_PRESENT_FLAG 0x00000002 92#define CP_ORPHAN_PRESENT_FLAG 0x00000002
88#define CP_UMOUNT_FLAG 0x00000001 93#define CP_UMOUNT_FLAG 0x00000001
89 94
95#define F2FS_CP_PACKS 2 /* # of checkpoint packs */
96
90struct f2fs_checkpoint { 97struct f2fs_checkpoint {
91 __le64 checkpoint_ver; /* checkpoint block version number */ 98 __le64 checkpoint_ver; /* checkpoint block version number */
92 __le64 user_block_count; /* # of user blocks */ 99 __le64 user_block_count; /* # of user blocks */
@@ -123,6 +130,9 @@ struct f2fs_checkpoint {
123 */ 130 */
124#define F2FS_ORPHANS_PER_BLOCK 1020 131#define F2FS_ORPHANS_PER_BLOCK 1020
125 132
133#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
134 F2FS_ORPHANS_PER_BLOCK)
135
126struct f2fs_orphan_block { 136struct f2fs_orphan_block {
127 __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */ 137 __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
128 __le32 reserved; /* reserved */ 138 __le32 reserved; /* reserved */
@@ -144,6 +154,7 @@ struct f2fs_extent {
144#define F2FS_NAME_LEN 255 154#define F2FS_NAME_LEN 255
145#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ 155#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
146#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ 156#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
157#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
147#define ADDRS_PER_INODE(fi) addrs_per_inode(fi) 158#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
148#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ 159#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
149#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ 160#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
@@ -163,8 +174,9 @@ struct f2fs_extent {
163#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ 174#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
164 F2FS_INLINE_XATTR_ADDRS - 1)) 175 F2FS_INLINE_XATTR_ADDRS - 1))
165 176
166#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) \ 177#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
167 - sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1)) 178 sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
179 DEF_NIDS_PER_INODE - 1))
168 180
169struct f2fs_inode { 181struct f2fs_inode {
170 __le16 i_mode; /* file mode */ 182 __le16 i_mode; /* file mode */
@@ -194,7 +206,7 @@ struct f2fs_inode {
194 206
195 __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ 207 __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
196 208
197 __le32 i_nid[5]; /* direct(2), indirect(2), 209 __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
198 double_indirect(1) node id */ 210 double_indirect(1) node id */
199} __packed; 211} __packed;
200 212
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a5227ab8ccb1..ca95abd2bed1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -4,58 +4,24 @@
4#ifndef __LINUX_FILTER_H__ 4#ifndef __LINUX_FILTER_H__
5#define __LINUX_FILTER_H__ 5#define __LINUX_FILTER_H__
6 6
7#include <stdarg.h>
8
7#include <linux/atomic.h> 9#include <linux/atomic.h>
8#include <linux/compat.h> 10#include <linux/compat.h>
9#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/linkage.h>
13#include <linux/printk.h>
10#include <linux/workqueue.h> 14#include <linux/workqueue.h>
11#include <uapi/linux/filter.h>
12 15
13/* Internally used and optimized filter representation with extended 16#include <asm/cacheflush.h>
14 * instruction set based on top of classic BPF.
15 */
16 17
17/* instruction classes */ 18#include <uapi/linux/filter.h>
18#define BPF_ALU64 0x07 /* alu mode in double word width */ 19#include <uapi/linux/bpf.h>
19
20/* ld/ldx fields */
21#define BPF_DW 0x18 /* double word */
22#define BPF_XADD 0xc0 /* exclusive add */
23
24/* alu/jmp fields */
25#define BPF_MOV 0xb0 /* mov reg to reg */
26#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
27
28/* change endianness of a register */
29#define BPF_END 0xd0 /* flags for endianness conversion: */
30#define BPF_TO_LE 0x00 /* convert to little-endian */
31#define BPF_TO_BE 0x08 /* convert to big-endian */
32#define BPF_FROM_LE BPF_TO_LE
33#define BPF_FROM_BE BPF_TO_BE
34
35#define BPF_JNE 0x50 /* jump != */
36#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
37#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
38#define BPF_CALL 0x80 /* function call */
39#define BPF_EXIT 0x90 /* function return */
40
41/* Register numbers */
42enum {
43 BPF_REG_0 = 0,
44 BPF_REG_1,
45 BPF_REG_2,
46 BPF_REG_3,
47 BPF_REG_4,
48 BPF_REG_5,
49 BPF_REG_6,
50 BPF_REG_7,
51 BPF_REG_8,
52 BPF_REG_9,
53 BPF_REG_10,
54 __MAX_BPF_REG,
55};
56 20
57/* BPF has 10 general purpose 64-bit registers and stack frame. */ 21struct sk_buff;
58#define MAX_BPF_REG __MAX_BPF_REG 22struct sock;
23struct seccomp_data;
24struct bpf_prog_aux;
59 25
60/* ArgX, context and stack frame pointer register positions. Note, 26/* ArgX, context and stack frame pointer register positions. Note,
61 * Arg1, Arg2, Arg3, etc are used as argument mappings of function 27 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -161,6 +127,30 @@ enum {
161 .off = 0, \ 127 .off = 0, \
162 .imm = IMM }) 128 .imm = IMM })
163 129
130/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
131#define BPF_LD_IMM64(DST, IMM) \
132 BPF_LD_IMM64_RAW(DST, 0, IMM)
133
134#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
135 ((struct bpf_insn) { \
136 .code = BPF_LD | BPF_DW | BPF_IMM, \
137 .dst_reg = DST, \
138 .src_reg = SRC, \
139 .off = 0, \
140 .imm = (__u32) (IMM) }), \
141 ((struct bpf_insn) { \
142 .code = 0, /* zero is reserved opcode */ \
143 .dst_reg = 0, \
144 .src_reg = 0, \
145 .off = 0, \
146 .imm = ((__u64) (IMM)) >> 32 })
147
148#define BPF_PSEUDO_MAP_FD 1
149
150/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
151#define BPF_LD_MAP_FD(DST, MAP_FD) \
152 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
153
164/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 154/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
165 155
166#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 156#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
@@ -299,14 +289,6 @@ enum {
299#define SK_RUN_FILTER(filter, ctx) \ 289#define SK_RUN_FILTER(filter, ctx) \
300 (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) 290 (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
301 291
302struct bpf_insn {
303 __u8 code; /* opcode */
304 __u8 dst_reg:4; /* dest register */
305 __u8 src_reg:4; /* source register */
306 __s16 off; /* signed offset */
307 __s32 imm; /* signed immediate constant */
308};
309
310#ifdef CONFIG_COMPAT 292#ifdef CONFIG_COMPAT
311/* A struct sock_filter is architecture independent. */ 293/* A struct sock_filter is architecture independent. */
312struct compat_sock_fprog { 294struct compat_sock_fprog {
@@ -320,20 +302,23 @@ struct sock_fprog_kern {
320 struct sock_filter *filter; 302 struct sock_filter *filter;
321}; 303};
322 304
323struct sk_buff; 305struct bpf_binary_header {
324struct sock; 306 unsigned int pages;
325struct seccomp_data; 307 u8 image[];
308};
326 309
327struct bpf_prog { 310struct bpf_prog {
328 u32 jited:1, /* Is our filter JIT'ed? */ 311 u16 pages; /* Number of allocated pages */
329 len:31; /* Number of filter blocks */ 312 bool jited; /* Is our filter JIT'ed? */
313 u32 len; /* Number of filter blocks */
330 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 314 struct sock_fprog_kern *orig_prog; /* Original BPF program */
315 struct bpf_prog_aux *aux; /* Auxiliary fields */
331 unsigned int (*bpf_func)(const struct sk_buff *skb, 316 unsigned int (*bpf_func)(const struct sk_buff *skb,
332 const struct bpf_insn *filter); 317 const struct bpf_insn *filter);
318 /* Instructions for interpreter */
333 union { 319 union {
334 struct sock_filter insns[0]; 320 struct sock_filter insns[0];
335 struct bpf_insn insnsi[0]; 321 struct bpf_insn insnsi[0];
336 struct work_struct work;
337 }; 322 };
338}; 323};
339 324
@@ -353,6 +338,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
353 338
354#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 339#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
355 340
341#ifdef CONFIG_DEBUG_SET_MODULE_RONX
342static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
343{
344 set_memory_ro((unsigned long)fp, fp->pages);
345}
346
347static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
348{
349 set_memory_rw((unsigned long)fp, fp->pages);
350}
351#else
352static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
353{
354}
355
356static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
357{
358}
359#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
360
356int sk_filter(struct sock *sk, struct sk_buff *skb); 361int sk_filter(struct sock *sk, struct sk_buff *skb);
357 362
358void bpf_prog_select_runtime(struct bpf_prog *fp); 363void bpf_prog_select_runtime(struct bpf_prog *fp);
@@ -361,6 +366,17 @@ void bpf_prog_free(struct bpf_prog *fp);
361int bpf_convert_filter(struct sock_filter *prog, int len, 366int bpf_convert_filter(struct sock_filter *prog, int len,
362 struct bpf_insn *new_prog, int *new_len); 367 struct bpf_insn *new_prog, int *new_len);
363 368
369struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
370struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
371 gfp_t gfp_extra_flags);
372void __bpf_prog_free(struct bpf_prog *fp);
373
374static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
375{
376 bpf_prog_unlock_ro(fp);
377 __bpf_prog_free(fp);
378}
379
364int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); 380int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
365void bpf_prog_destroy(struct bpf_prog *fp); 381void bpf_prog_destroy(struct bpf_prog *fp);
366 382
@@ -377,6 +393,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
377u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 393u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
378void bpf_int_jit_compile(struct bpf_prog *fp); 394void bpf_int_jit_compile(struct bpf_prog *fp);
379 395
396#ifdef CONFIG_BPF_JIT
397typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
398
399struct bpf_binary_header *
400bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
401 unsigned int alignment,
402 bpf_jit_fill_hole_t bpf_fill_ill_insns);
403void bpf_jit_binary_free(struct bpf_binary_header *hdr);
404
405void bpf_jit_compile(struct bpf_prog *fp);
406void bpf_jit_free(struct bpf_prog *fp);
407
408static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
409 u32 pass, void *image)
410{
411 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
412 flen, proglen, pass, image);
413 if (image)
414 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
415 16, 1, image, proglen, false);
416}
417#else
418static inline void bpf_jit_compile(struct bpf_prog *fp)
419{
420}
421
422static inline void bpf_jit_free(struct bpf_prog *fp)
423{
424 bpf_prog_unlock_free(fp);
425}
426#endif /* CONFIG_BPF_JIT */
427
380#define BPF_ANC BIT(15) 428#define BPF_ANC BIT(15)
381 429
382static inline u16 bpf_anc_helper(const struct sock_filter *ftest) 430static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
@@ -424,36 +472,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
424 return bpf_internal_load_pointer_neg_helper(skb, k, size); 472 return bpf_internal_load_pointer_neg_helper(skb, k, size);
425} 473}
426 474
427#ifdef CONFIG_BPF_JIT
428#include <stdarg.h>
429#include <linux/linkage.h>
430#include <linux/printk.h>
431
432void bpf_jit_compile(struct bpf_prog *fp);
433void bpf_jit_free(struct bpf_prog *fp);
434
435static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
436 u32 pass, void *image)
437{
438 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
439 flen, proglen, pass, image);
440 if (image)
441 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
442 16, 1, image, proglen, false);
443}
444#else
445#include <linux/slab.h>
446
447static inline void bpf_jit_compile(struct bpf_prog *fp)
448{
449}
450
451static inline void bpf_jit_free(struct bpf_prog *fp)
452{
453 kfree(fp);
454}
455#endif /* CONFIG_BPF_JIT */
456
457static inline int bpf_tell_extensions(void) 475static inline int bpf_tell_extensions(void)
458{ 476{
459 return SKF_AD_MAX; 477 return SKF_AD_MAX;
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 4ebc49fae391..0d348e011a6e 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -10,6 +10,7 @@
10#include <linux/percpu_counter.h> 10#include <linux/percpu_counter.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/seqlock.h> 12#include <linux/seqlock.h>
13#include <linux/gfp.h>
13 14
14/* 15/*
15 * When maximum proportion of some event type is specified, this is the 16 * When maximum proportion of some event type is specified, this is the
@@ -32,7 +33,7 @@ struct fprop_global {
32 seqcount_t sequence; 33 seqcount_t sequence;
33}; 34};
34 35
35int fprop_global_init(struct fprop_global *p); 36int fprop_global_init(struct fprop_global *p, gfp_t gfp);
36void fprop_global_destroy(struct fprop_global *p); 37void fprop_global_destroy(struct fprop_global *p);
37bool fprop_new_period(struct fprop_global *p, int periods); 38bool fprop_new_period(struct fprop_global *p, int periods);
38 39
@@ -79,7 +80,7 @@ struct fprop_local_percpu {
79 raw_spinlock_t lock; /* Protect period and numerator */ 80 raw_spinlock_t lock; /* Protect period and numerator */
80}; 81};
81 82
82int fprop_local_init_percpu(struct fprop_local_percpu *pl); 83int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
83void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); 84void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
84void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); 85void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
85void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, 86void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 94187721ad41..ab4f1a10da20 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -851,13 +851,7 @@ static inline struct file *get_file(struct file *f)
851 */ 851 */
852#define FILE_LOCK_DEFERRED 1 852#define FILE_LOCK_DEFERRED 1
853 853
854/* 854/* legacy typedef, should eventually be removed */
855 * The POSIX file lock owner is determined by
856 * the "struct files_struct" in the thread group
857 * (or NULL for no owner - BSD locks).
858 *
859 * Lockd stuffs a "host" pointer into this.
860 */
861typedef void *fl_owner_t; 855typedef void *fl_owner_t;
862 856
863struct file_lock_operations { 857struct file_lock_operations {
@@ -868,10 +862,13 @@ struct file_lock_operations {
868struct lock_manager_operations { 862struct lock_manager_operations {
869 int (*lm_compare_owner)(struct file_lock *, struct file_lock *); 863 int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
870 unsigned long (*lm_owner_key)(struct file_lock *); 864 unsigned long (*lm_owner_key)(struct file_lock *);
865 void (*lm_get_owner)(struct file_lock *, struct file_lock *);
866 void (*lm_put_owner)(struct file_lock *);
871 void (*lm_notify)(struct file_lock *); /* unblock callback */ 867 void (*lm_notify)(struct file_lock *); /* unblock callback */
872 int (*lm_grant)(struct file_lock *, struct file_lock *, int); 868 int (*lm_grant)(struct file_lock *, int);
873 void (*lm_break)(struct file_lock *); 869 bool (*lm_break)(struct file_lock *);
874 int (*lm_change)(struct file_lock **, int); 870 int (*lm_change)(struct file_lock **, int, struct list_head *);
871 void (*lm_setup)(struct file_lock *, void **);
875}; 872};
876 873
877struct lock_manager { 874struct lock_manager {
@@ -966,7 +963,7 @@ void locks_free_lock(struct file_lock *fl);
966extern void locks_init_lock(struct file_lock *); 963extern void locks_init_lock(struct file_lock *);
967extern struct file_lock * locks_alloc_lock(void); 964extern struct file_lock * locks_alloc_lock(void);
968extern void locks_copy_lock(struct file_lock *, struct file_lock *); 965extern void locks_copy_lock(struct file_lock *, struct file_lock *);
969extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); 966extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
970extern void locks_remove_posix(struct file *, fl_owner_t); 967extern void locks_remove_posix(struct file *, fl_owner_t);
971extern void locks_remove_file(struct file *); 968extern void locks_remove_file(struct file *);
972extern void locks_release_private(struct file_lock *); 969extern void locks_release_private(struct file_lock *);
@@ -980,11 +977,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
980extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); 977extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
981extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); 978extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
982extern void lease_get_mtime(struct inode *, struct timespec *time); 979extern void lease_get_mtime(struct inode *, struct timespec *time);
983extern int generic_setlease(struct file *, long, struct file_lock **); 980extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
984extern int vfs_setlease(struct file *, long, struct file_lock **); 981extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
985extern int lease_modify(struct file_lock **, int); 982extern int lease_modify(struct file_lock **, int, struct list_head *);
986extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
987extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
988#else /* !CONFIG_FILE_LOCKING */ 983#else /* !CONFIG_FILE_LOCKING */
989static inline int fcntl_getlk(struct file *file, unsigned int cmd, 984static inline int fcntl_getlk(struct file *file, unsigned int cmd,
990 struct flock __user *user) 985 struct flock __user *user)
@@ -1013,12 +1008,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
1013#endif 1008#endif
1014static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1009static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1015{ 1010{
1016 return 0; 1011 return -EINVAL;
1017} 1012}
1018 1013
1019static inline int fcntl_getlease(struct file *filp) 1014static inline int fcntl_getlease(struct file *filp)
1020{ 1015{
1021 return 0; 1016 return F_UNLCK;
1022} 1017}
1023 1018
1024static inline void locks_init_lock(struct file_lock *fl) 1019static inline void locks_init_lock(struct file_lock *fl)
@@ -1026,7 +1021,7 @@ static inline void locks_init_lock(struct file_lock *fl)
1026 return; 1021 return;
1027} 1022}
1028 1023
1029static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) 1024static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
1030{ 1025{
1031 return; 1026 return;
1032} 1027}
@@ -1100,33 +1095,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
1100} 1095}
1101 1096
1102static inline int generic_setlease(struct file *filp, long arg, 1097static inline int generic_setlease(struct file *filp, long arg,
1103 struct file_lock **flp) 1098 struct file_lock **flp, void **priv)
1104{ 1099{
1105 return -EINVAL; 1100 return -EINVAL;
1106} 1101}
1107 1102
1108static inline int vfs_setlease(struct file *filp, long arg, 1103static inline int vfs_setlease(struct file *filp, long arg,
1109 struct file_lock **lease) 1104 struct file_lock **lease, void **priv)
1110{ 1105{
1111 return -EINVAL; 1106 return -EINVAL;
1112} 1107}
1113 1108
1114static inline int lease_modify(struct file_lock **before, int arg) 1109static inline int lease_modify(struct file_lock **before, int arg,
1110 struct list_head *dispose)
1115{ 1111{
1116 return -EINVAL; 1112 return -EINVAL;
1117} 1113}
1118
1119static inline int lock_may_read(struct inode *inode, loff_t start,
1120 unsigned long len)
1121{
1122 return 1;
1123}
1124
1125static inline int lock_may_write(struct inode *inode, loff_t start,
1126 unsigned long len)
1127{
1128 return 1;
1129}
1130#endif /* !CONFIG_FILE_LOCKING */ 1114#endif /* !CONFIG_FILE_LOCKING */
1131 1115
1132 1116
@@ -1151,8 +1135,8 @@ extern void fasync_free(struct fasync_struct *);
1151/* can be called from interrupts */ 1135/* can be called from interrupts */
1152extern void kill_fasync(struct fasync_struct **, int, int); 1136extern void kill_fasync(struct fasync_struct **, int, int);
1153 1137
1154extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); 1138extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
1155extern int f_setown(struct file *filp, unsigned long arg, int force); 1139extern void f_setown(struct file *filp, unsigned long arg, int force);
1156extern void f_delown(struct file *filp); 1140extern void f_delown(struct file *filp);
1157extern pid_t f_getown(struct file *filp); 1141extern pid_t f_getown(struct file *filp);
1158extern int send_sigurg(struct fown_struct *fown); 1142extern int send_sigurg(struct fown_struct *fown);
@@ -1506,7 +1490,7 @@ struct file_operations {
1506 int (*flock) (struct file *, int, struct file_lock *); 1490 int (*flock) (struct file *, int, struct file_lock *);
1507 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); 1491 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
1508 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); 1492 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
1509 int (*setlease)(struct file *, long, struct file_lock **); 1493 int (*setlease)(struct file *, long, struct file_lock **, void **);
1510 long (*fallocate)(struct file *file, int mode, loff_t offset, 1494 long (*fallocate)(struct file *file, int mode, loff_t offset,
1511 loff_t len); 1495 loff_t len);
1512 int (*show_fdinfo)(struct seq_file *m, struct file *f); 1496 int (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -1855,7 +1839,8 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
1855extern void kern_unmount(struct vfsmount *mnt); 1839extern void kern_unmount(struct vfsmount *mnt);
1856extern int may_umount_tree(struct vfsmount *); 1840extern int may_umount_tree(struct vfsmount *);
1857extern int may_umount(struct vfsmount *); 1841extern int may_umount(struct vfsmount *);
1858extern long do_mount(const char *, const char *, const char *, unsigned long, void *); 1842extern long do_mount(const char *, const char __user *,
1843 const char *, unsigned long, void *);
1859extern struct vfsmount *collect_mounts(struct path *); 1844extern struct vfsmount *collect_mounts(struct path *);
1860extern void drop_collected_mounts(struct vfsmount *); 1845extern void drop_collected_mounts(struct vfsmount *);
1861extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, 1846extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
@@ -1874,7 +1859,7 @@ extern int current_umask(void);
1874extern void ihold(struct inode * inode); 1859extern void ihold(struct inode * inode);
1875extern void iput(struct inode *); 1860extern void iput(struct inode *);
1876 1861
1877static inline struct inode *file_inode(struct file *f) 1862static inline struct inode *file_inode(const struct file *f)
1878{ 1863{
1879 return f->f_inode; 1864 return f->f_inode;
1880} 1865}
@@ -2611,6 +2596,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping,
2611 struct page *page, void *fsdata); 2596 struct page *page, void *fsdata);
2612extern int always_delete_dentry(const struct dentry *); 2597extern int always_delete_dentry(const struct dentry *);
2613extern struct inode *alloc_anon_inode(struct super_block *); 2598extern struct inode *alloc_anon_inode(struct super_block *);
2599extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
2614extern const struct dentry_operations simple_dentry_operations; 2600extern const struct dentry_operations simple_dentry_operations;
2615 2601
2616extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); 2602extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index efb05961bdd8..77d783f71527 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -139,7 +139,6 @@ struct fs_platform_info {
139 int rx_ring, tx_ring; /* number of buffers on rx */ 139 int rx_ring, tx_ring; /* number of buffers on rx */
140 __u8 macaddr[ETH_ALEN]; /* mac address */ 140 __u8 macaddr[ETH_ALEN]; /* mac address */
141 int rx_copybreak; /* limit we copy small frames */ 141 int rx_copybreak; /* limit we copy small frames */
142 int use_napi; /* use NAPI */
143 int napi_weight; /* NAPI weight */ 142 int napi_weight; /* NAPI weight */
144 143
145 int use_rmii; /* use RMII mode */ 144 int use_rmii; /* use RMII mode */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index f49ddb1b2273..84d60cb841b1 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -781,13 +781,13 @@ struct fsl_ifc_regs {
781 __be32 amask; 781 __be32 amask;
782 u32 res4[0x2]; 782 u32 res4[0x2];
783 } amask_cs[FSL_IFC_BANK_COUNT]; 783 } amask_cs[FSL_IFC_BANK_COUNT];
784 u32 res5[0x17]; 784 u32 res5[0x18];
785 struct { 785 struct {
786 __be32 csor_ext;
787 __be32 csor; 786 __be32 csor;
787 __be32 csor_ext;
788 u32 res6; 788 u32 res6;
789 } csor_cs[FSL_IFC_BANK_COUNT]; 789 } csor_cs[FSL_IFC_BANK_COUNT];
790 u32 res7[0x19]; 790 u32 res7[0x18];
791 struct { 791 struct {
792 __be32 ftim[4]; 792 __be32 ftim[4];
793 u32 res8[0x8]; 793 u32 res8[0x8];
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f0b0edbf55a9..662697babd48 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -56,6 +56,8 @@ struct ftrace_ops;
56typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 56typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
57 struct ftrace_ops *op, struct pt_regs *regs); 57 struct ftrace_ops *op, struct pt_regs *regs);
58 58
59ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
60
59/* 61/*
60 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 62 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
61 * set in the flags member. 63 * set in the flags member.
@@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
89 * INITIALIZED - The ftrace_ops has already been initialized (first use time 91 * INITIALIZED - The ftrace_ops has already been initialized (first use time
90 * register_ftrace_function() is called, it will initialized the ops) 92 * register_ftrace_function() is called, it will initialized the ops)
91 * DELETED - The ops are being deleted, do not let them be registered again. 93 * DELETED - The ops are being deleted, do not let them be registered again.
94 * ADDING - The ops is in the process of being added.
95 * REMOVING - The ops is in the process of being removed.
96 * MODIFYING - The ops is in the process of changing its filter functions.
92 */ 97 */
93enum { 98enum {
94 FTRACE_OPS_FL_ENABLED = 1 << 0, 99 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -100,6 +105,9 @@ enum {
100 FTRACE_OPS_FL_STUB = 1 << 6, 105 FTRACE_OPS_FL_STUB = 1 << 6,
101 FTRACE_OPS_FL_INITIALIZED = 1 << 7, 106 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
102 FTRACE_OPS_FL_DELETED = 1 << 8, 107 FTRACE_OPS_FL_DELETED = 1 << 8,
108 FTRACE_OPS_FL_ADDING = 1 << 9,
109 FTRACE_OPS_FL_REMOVING = 1 << 10,
110 FTRACE_OPS_FL_MODIFYING = 1 << 11,
103}; 111};
104 112
105#ifdef CONFIG_DYNAMIC_FTRACE 113#ifdef CONFIG_DYNAMIC_FTRACE
@@ -132,7 +140,7 @@ struct ftrace_ops {
132 int nr_trampolines; 140 int nr_trampolines;
133 struct ftrace_ops_hash local_hash; 141 struct ftrace_ops_hash local_hash;
134 struct ftrace_ops_hash *func_hash; 142 struct ftrace_ops_hash *func_hash;
135 struct ftrace_hash *tramp_hash; 143 struct ftrace_ops_hash old_hash;
136 unsigned long trampoline; 144 unsigned long trampoline;
137#endif 145#endif
138}; 146};
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 1c2fdaa2ffc3..1ccaab44abcc 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
110extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 110extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
111 unsigned long start, unsigned int nr, void *data); 111 unsigned long start, unsigned int nr, void *data);
112 112
113extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
114 unsigned long size, unsigned long start, unsigned int nr,
115 void *data);
116
113extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 117extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
114 unsigned long start, unsigned int nr, void *data); 118 unsigned long start, unsigned int nr, void *data);
115 119
@@ -117,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev,
117 int min_alloc_order, int nid); 121 int min_alloc_order, int nid);
118extern struct gen_pool *dev_get_gen_pool(struct device *dev); 122extern struct gen_pool *dev_get_gen_pool(struct device *dev);
119 123
124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
125 size_t size);
126
120#ifdef CONFIG_OF 127#ifdef CONFIG_OF
121extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, 128extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
122 const char *propname, int index); 129 const char *propname, int index);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5e7219dc0fae..41b30fd4d041 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -156,7 +156,7 @@ struct vm_area_struct;
156#define GFP_DMA32 __GFP_DMA32 156#define GFP_DMA32 __GFP_DMA32
157 157
158/* Convert GFP flags to their corresponding migrate type */ 158/* Convert GFP flags to their corresponding migrate type */
159static inline int allocflags_to_migratetype(gfp_t gfp_flags) 159static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
160{ 160{
161 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 161 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
162 162
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index c7e17de732f3..12f146fa6604 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -38,60 +38,32 @@ enum gpiod_flags {
38struct gpio_desc *__must_check __gpiod_get(struct device *dev, 38struct gpio_desc *__must_check __gpiod_get(struct device *dev,
39 const char *con_id, 39 const char *con_id,
40 enum gpiod_flags flags); 40 enum gpiod_flags flags);
41#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
42#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
43struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, 41struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
44 const char *con_id, 42 const char *con_id,
45 unsigned int idx, 43 unsigned int idx,
46 enum gpiod_flags flags); 44 enum gpiod_flags flags);
47#define __gpiod_get_index(dev, con_id, index, flags, ...) \
48 __gpiod_get_index(dev, con_id, index, flags)
49#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
50struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev, 45struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
51 const char *con_id, 46 const char *con_id,
52 enum gpiod_flags flags); 47 enum gpiod_flags flags);
53#define __gpiod_get_optional(dev, con_id, flags, ...) \
54 __gpiod_get_optional(dev, con_id, flags)
55#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
56struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, 48struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
57 const char *con_id, 49 const char *con_id,
58 unsigned int index, 50 unsigned int index,
59 enum gpiod_flags flags); 51 enum gpiod_flags flags);
60#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
61 __gpiod_get_index_optional(dev, con_id, index, flags)
62#define gpiod_get_index_optional(varargs...) \
63 __gpiod_get_index_optional(varargs, 0)
64
65void gpiod_put(struct gpio_desc *desc); 52void gpiod_put(struct gpio_desc *desc);
66 53
67struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev, 54struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
68 const char *con_id, 55 const char *con_id,
69 enum gpiod_flags flags); 56 enum gpiod_flags flags);
70#define __devm_gpiod_get(dev, con_id, flags, ...) \
71 __devm_gpiod_get(dev, con_id, flags)
72#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
73struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, 57struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
74 const char *con_id, 58 const char *con_id,
75 unsigned int idx, 59 unsigned int idx,
76 enum gpiod_flags flags); 60 enum gpiod_flags flags);
77#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
78 __devm_gpiod_get_index(dev, con_id, index, flags)
79#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
80struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev, 61struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
81 const char *con_id, 62 const char *con_id,
82 enum gpiod_flags flags); 63 enum gpiod_flags flags);
83#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
84 __devm_gpiod_get_optional(dev, con_id, flags)
85#define devm_gpiod_get_optional(varargs...) \
86 __devm_gpiod_get_optional(varargs, 0)
87struct gpio_desc *__must_check 64struct gpio_desc *__must_check
88__devm_gpiod_get_index_optional(struct device *dev, const char *con_id, 65__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
89 unsigned int index, enum gpiod_flags flags); 66 unsigned int index, enum gpiod_flags flags);
90#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
91 __devm_gpiod_get_index_optional(dev, con_id, index, flags)
92#define devm_gpiod_get_index_optional(varargs...) \
93 __devm_gpiod_get_index_optional(varargs, 0)
94
95void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); 67void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
96 68
97int gpiod_get_direction(const struct gpio_desc *desc); 69int gpiod_get_direction(const struct gpio_desc *desc);
@@ -124,27 +96,31 @@ int desc_to_gpio(const struct gpio_desc *desc);
124 96
125#else /* CONFIG_GPIOLIB */ 97#else /* CONFIG_GPIOLIB */
126 98
127static inline struct gpio_desc *__must_check gpiod_get(struct device *dev, 99static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
128 const char *con_id) 100 const char *con_id,
101 enum gpiod_flags flags)
129{ 102{
130 return ERR_PTR(-ENOSYS); 103 return ERR_PTR(-ENOSYS);
131} 104}
132static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev, 105static inline struct gpio_desc *__must_check
133 const char *con_id, 106__gpiod_get_index(struct device *dev,
134 unsigned int idx) 107 const char *con_id,
108 unsigned int idx,
109 enum gpiod_flags flags)
135{ 110{
136 return ERR_PTR(-ENOSYS); 111 return ERR_PTR(-ENOSYS);
137} 112}
138 113
139static inline struct gpio_desc *__must_check 114static inline struct gpio_desc *__must_check
140gpiod_get_optional(struct device *dev, const char *con_id) 115__gpiod_get_optional(struct device *dev, const char *con_id,
116 enum gpiod_flags flags)
141{ 117{
142 return ERR_PTR(-ENOSYS); 118 return ERR_PTR(-ENOSYS);
143} 119}
144 120
145static inline struct gpio_desc *__must_check 121static inline struct gpio_desc *__must_check
146gpiod_get_index_optional(struct device *dev, const char *con_id, 122__gpiod_get_index_optional(struct device *dev, const char *con_id,
147 unsigned int index) 123 unsigned int index, enum gpiod_flags flags)
148{ 124{
149 return ERR_PTR(-ENOSYS); 125 return ERR_PTR(-ENOSYS);
150} 126}
@@ -157,28 +133,33 @@ static inline void gpiod_put(struct gpio_desc *desc)
157 WARN_ON(1); 133 WARN_ON(1);
158} 134}
159 135
160static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, 136static inline struct gpio_desc *__must_check
161 const char *con_id) 137__devm_gpiod_get(struct device *dev,
138 const char *con_id,
139 enum gpiod_flags flags)
162{ 140{
163 return ERR_PTR(-ENOSYS); 141 return ERR_PTR(-ENOSYS);
164} 142}
165static inline 143static inline
166struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, 144struct gpio_desc *__must_check
167 const char *con_id, 145__devm_gpiod_get_index(struct device *dev,
168 unsigned int idx) 146 const char *con_id,
147 unsigned int idx,
148 enum gpiod_flags flags)
169{ 149{
170 return ERR_PTR(-ENOSYS); 150 return ERR_PTR(-ENOSYS);
171} 151}
172 152
173static inline struct gpio_desc *__must_check 153static inline struct gpio_desc *__must_check
174devm_gpiod_get_optional(struct device *dev, const char *con_id) 154__devm_gpiod_get_optional(struct device *dev, const char *con_id,
155 enum gpiod_flags flags)
175{ 156{
176 return ERR_PTR(-ENOSYS); 157 return ERR_PTR(-ENOSYS);
177} 158}
178 159
179static inline struct gpio_desc *__must_check 160static inline struct gpio_desc *__must_check
180devm_gpiod_get_index_optional(struct device *dev, const char *con_id, 161__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
181 unsigned int index) 162 unsigned int index, enum gpiod_flags flags)
182{ 163{
183 return ERR_PTR(-ENOSYS); 164 return ERR_PTR(-ENOSYS);
184} 165}
@@ -303,9 +284,43 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
303 return -EINVAL; 284 return -EINVAL;
304} 285}
305 286
306
307#endif /* CONFIG_GPIOLIB */ 287#endif /* CONFIG_GPIOLIB */
308 288
289/*
290 * Vararg-hacks! This is done to transition the kernel to always pass
291 * the options flags argument to the below functions. During a transition
292 * phase these vararg macros make both old-and-newstyle code compile,
293 * but when all calls to the elder API are removed, these should go away
294 * and the __gpiod_get() etc functions above be renamed just gpiod_get()
295 * etc.
296 */
297#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
298#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
299#define __gpiod_get_index(dev, con_id, index, flags, ...) \
300 __gpiod_get_index(dev, con_id, index, flags)
301#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
302#define __gpiod_get_optional(dev, con_id, flags, ...) \
303 __gpiod_get_optional(dev, con_id, flags)
304#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
305#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
306 __gpiod_get_index_optional(dev, con_id, index, flags)
307#define gpiod_get_index_optional(varargs...) \
308 __gpiod_get_index_optional(varargs, 0)
309#define __devm_gpiod_get(dev, con_id, flags, ...) \
310 __devm_gpiod_get(dev, con_id, flags)
311#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
312#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
313 __devm_gpiod_get_index(dev, con_id, index, flags)
314#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
315#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
316 __devm_gpiod_get_optional(dev, con_id, flags)
317#define devm_gpiod_get_optional(varargs...) \
318 __devm_gpiod_get_optional(varargs, 0)
319#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
320 __devm_gpiod_get_index_optional(dev, con_id, index, flags)
321#define devm_gpiod_get_index_optional(varargs...) \
322 __devm_gpiod_get_index_optional(varargs, 0)
323
309#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) 324#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
310 325
311int gpiod_export(struct gpio_desc *desc, bool direction_may_change); 326int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index e78a2373e374..249db3057e4d 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -56,6 +56,8 @@ struct seq_file;
56 * as the chip access may sleep when e.g. reading out the IRQ status 56 * as the chip access may sleep when e.g. reading out the IRQ status
57 * registers. 57 * registers.
58 * @exported: flags if the gpiochip is exported for use from sysfs. Private. 58 * @exported: flags if the gpiochip is exported for use from sysfs. Private.
59 * @irq_not_threaded: flag must be set if @can_sleep is set but the
60 * IRQs don't need to be threaded
59 * 61 *
60 * A gpio_chip can help platforms abstract various sources of GPIOs so 62 * A gpio_chip can help platforms abstract various sources of GPIOs so
61 * they can all be accessed through a common programing interface. 63 * they can all be accessed through a common programing interface.
@@ -101,11 +103,12 @@ struct gpio_chip {
101 struct gpio_desc *desc; 103 struct gpio_desc *desc;
102 const char *const *names; 104 const char *const *names;
103 bool can_sleep; 105 bool can_sleep;
106 bool irq_not_threaded;
104 bool exported; 107 bool exported;
105 108
106#ifdef CONFIG_GPIOLIB_IRQCHIP 109#ifdef CONFIG_GPIOLIB_IRQCHIP
107 /* 110 /*
108 * With CONFIG_GPIO_IRQCHIP we get an irqchip inside the gpiolib 111 * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
109 * to handle IRQs for most practical cases. 112 * to handle IRQs for most practical cases.
110 */ 113 */
111 struct irq_chip *irqchip; 114 struct irq_chip *irqchip;
@@ -141,7 +144,7 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
141 144
142/* add/remove chips */ 145/* add/remove chips */
143extern int gpiochip_add(struct gpio_chip *chip); 146extern int gpiochip_add(struct gpio_chip *chip);
144extern int gpiochip_remove(struct gpio_chip *chip); 147extern void gpiochip_remove(struct gpio_chip *chip);
145extern struct gpio_chip *gpiochip_find(void *data, 148extern struct gpio_chip *gpiochip_find(void *data,
146 int (*match)(struct gpio_chip *chip, void *data)); 149 int (*match)(struct gpio_chip *chip, void *data));
147 150
@@ -164,9 +167,10 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
164 irq_flow_handler_t handler, 167 irq_flow_handler_t handler,
165 unsigned int type); 168 unsigned int type);
166 169
167#endif /* CONFIG_GPIO_IRQCHIP */ 170#endif /* CONFIG_GPIOLIB_IRQCHIP */
168 171
169int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); 172struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
173 const char *label);
170void gpiochip_free_own_desc(struct gpio_desc *desc); 174void gpiochip_free_own_desc(struct gpio_desc *desc);
171 175
172#else /* CONFIG_GPIOLIB */ 176#else /* CONFIG_GPIOLIB */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index bd1754c7ecef..d0494c399392 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
37{ 37{
38 u64 hash = val; 38 u64 hash = val;
39 39
40#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
41 hash = hash * GOLDEN_RATIO_PRIME_64;
42#else
40 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ 43 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
41 u64 n = hash; 44 u64 n = hash;
42 n <<= 18; 45 n <<= 18;
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits)
51 hash += n; 54 hash += n;
52 n <<= 2; 55 n <<= 2;
53 hash += n; 56 hash += n;
57#endif
54 58
55 /* High bits are more random, so use them. */ 59 /* High bits are more random, so use them. */
56 return hash >> (64 - bits); 60 return hash >> (64 - bits);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f53c4a9cca1d..78ea9bf941cd 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -265,6 +265,7 @@ struct hid_item {
265#define HID_CONNECT_HIDDEV 0x08 265#define HID_CONNECT_HIDDEV 0x08
266#define HID_CONNECT_HIDDEV_FORCE 0x10 266#define HID_CONNECT_HIDDEV_FORCE 0x10
267#define HID_CONNECT_FF 0x20 267#define HID_CONNECT_FF 0x20
268#define HID_CONNECT_DRIVER 0x40
268#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ 269#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
269 HID_CONNECT_HIDDEV|HID_CONNECT_FF) 270 HID_CONNECT_HIDDEV|HID_CONNECT_FF)
270 271
@@ -287,6 +288,7 @@ struct hid_item {
287#define HID_QUIRK_HIDINPUT_FORCE 0x00000080 288#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
288#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 289#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
289#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200 290#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
291#define HID_QUIRK_ALWAYS_POLL 0x00000400
290#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 292#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
291#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 293#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
292#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 294#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
@@ -440,6 +442,7 @@ struct hid_output_fifo {
440#define HID_CLAIMED_INPUT 1 442#define HID_CLAIMED_INPUT 1
441#define HID_CLAIMED_HIDDEV 2 443#define HID_CLAIMED_HIDDEV 2
442#define HID_CLAIMED_HIDRAW 4 444#define HID_CLAIMED_HIDRAW 4
445#define HID_CLAIMED_DRIVER 8
443 446
444#define HID_STAT_ADDED 1 447#define HID_STAT_ADDED 1
445#define HID_STAT_PARSED 2 448#define HID_STAT_PARSED 2
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 63579cb8d3dc..ad9051bab267 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
132static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 132static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
133 spinlock_t **ptl) 133 spinlock_t **ptl)
134{ 134{
135 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); 135 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
136 if (pmd_trans_huge(*pmd)) 136 if (pmd_trans_huge(*pmd))
137 return __pmd_trans_huge_lock(pmd, vma, ptl); 137 return __pmd_trans_huge_lock(pmd, vma, ptl);
138 else 138 else
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a95efeb53a8b..b556e0ab946f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -577,20 +577,4 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
577} 577}
578#endif /* CONFIG_OF */ 578#endif /* CONFIG_OF */
579 579
580#ifdef CONFIG_ACPI
581void acpi_i2c_register_devices(struct i2c_adapter *adap);
582#else
583static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
584#endif /* CONFIG_ACPI */
585
586#ifdef CONFIG_ACPI_I2C_OPREGION
587int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
588void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
589#else
590static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
591{ }
592static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
593{ return 0; }
594#endif /* CONFIG_ACPI_I2C_OPREGION */
595
596#endif /* _LINUX_I2C_H */ 580#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i82593.h b/include/linux/i82593.h
deleted file mode 100644
index afac5c7a323d..000000000000
--- a/include/linux/i82593.h
+++ /dev/null
@@ -1,229 +0,0 @@
1/*
2 * Definitions for Intel 82593 CSMA/CD Core LAN Controller
3 * The definitions are taken from the 1992 users manual with Intel
4 * order number 297125-001.
5 *
6 * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
7 *
8 * Copyright 1994, Anders Klemets <klemets@it.kth.se>
9 *
10 * HISTORY
11 * i82593.h,v
12 * Revision 1.4 2005/11/4 09:15:00 baroniunas
13 * Modified copyright with permission of author as follows:
14 *
15 * "If I82539.H is the only file with my copyright statement
16 * that is included in the Source Forge project, then you have
17 * my approval to change the copyright statement to be a GPL
18 * license, in the way you proposed on October 10."
19 *
20 * Revision 1.1 1996/07/17 15:23:12 root
21 * Initial revision
22 *
23 * Revision 1.3 1995/04/05 15:13:58 adj
24 * Initial alpha release
25 *
26 * Revision 1.2 1994/06/16 23:57:31 klemets
27 * Mirrored all the fields in the configuration block.
28 *
29 * Revision 1.1 1994/06/02 20:25:34 klemets
30 * Initial revision
31 *
32 *
33 */
34#ifndef _I82593_H
35#define _I82593_H
36
37/* Intel 82593 CSMA/CD Core LAN Controller */
38
39/* Port 0 Command Register definitions */
40
41/* Execution operations */
42#define OP0_NOP 0 /* CHNL = 0 */
43#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */
44#define OP0_IA_SETUP 1
45#define OP0_CONFIGURE 2
46#define OP0_MC_SETUP 3
47#define OP0_TRANSMIT 4
48#define OP0_TDR 5
49#define OP0_DUMP 6
50#define OP0_DIAGNOSE 7
51#define OP0_TRANSMIT_NO_CRC 9
52#define OP0_RETRANSMIT 12
53#define OP0_ABORT 13
54/* Reception operations */
55#define OP0_RCV_ENABLE 8
56#define OP0_RCV_DISABLE 10
57#define OP0_STOP_RCV 11
58/* Status pointer control operations */
59#define OP0_FIX_PTR 15 /* CHNL = 1 */
60#define OP0_RLS_PTR 15 /* CHNL = 0 */
61#define OP0_RESET 14
62
63#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */
64#define CR0_STATUS_0 0x00
65#define CR0_STATUS_1 0x20
66#define CR0_STATUS_2 0x40
67#define CR0_STATUS_3 0x60
68#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */
69
70/* Port 0 Status Register definitions */
71
72#define SR0_NO_RESULT 0 /* dummy */
73#define SR0_EVENT_MASK 0x0f
74#define SR0_IA_SETUP_DONE 1
75#define SR0_CONFIGURE_DONE 2
76#define SR0_MC_SETUP_DONE 3
77#define SR0_TRANSMIT_DONE 4
78#define SR0_TDR_DONE 5
79#define SR0_DUMP_DONE 6
80#define SR0_DIAGNOSE_PASSED 7
81#define SR0_TRANSMIT_NO_CRC_DONE 9
82#define SR0_RETRANSMIT_DONE 12
83#define SR0_EXECUTION_ABORTED 13
84#define SR0_END_OF_FRAME 8
85#define SR0_RECEPTION_ABORTED 10
86#define SR0_DIAGNOSE_FAILED 15
87#define SR0_STOP_REG_HIT 11
88
89#define SR0_CHNL (1 << 4)
90#define SR0_EXECUTION (1 << 5)
91#define SR0_RECEPTION (1 << 6)
92#define SR0_INTERRUPT (1 << 7)
93#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION)
94
95#define SR3_EXEC_STATE_MASK 0x03
96#define SR3_EXEC_IDLE 0
97#define SR3_TX_ABORT_IN_PROGRESS 1
98#define SR3_EXEC_ACTIVE 2
99#define SR3_ABORT_IN_PROGRESS 3
100#define SR3_EXEC_CHNL (1 << 2)
101#define SR3_STP_ON_NO_RSRC (1 << 3)
102#define SR3_RCVING_NO_RSRC (1 << 4)
103#define SR3_RCV_STATE_MASK 0x60
104#define SR3_RCV_IDLE 0x00
105#define SR3_RCV_READY 0x20
106#define SR3_RCV_ACTIVE 0x40
107#define SR3_RCV_STOP_IN_PROG 0x60
108#define SR3_RCV_CHNL (1 << 7)
109
110/* Port 1 Command Register definitions */
111
112#define OP1_NOP 0
113#define OP1_SWIT_TO_PORT_0 1
114#define OP1_INT_DISABLE 2
115#define OP1_INT_ENABLE 3
116#define OP1_SET_TS 5
117#define OP1_RST_TS 7
118#define OP1_POWER_DOWN 8
119#define OP1_RESET_RING_MNGMT 11
120#define OP1_RESET 14
121#define OP1_SEL_RST 15
122
123#define CR1_STATUS_4 0x00
124#define CR1_STATUS_5 0x20
125#define CR1_STATUS_6 0x40
126#define CR1_STOP_REG_UPDATE (1 << 7)
127
128/* Receive frame status bits */
129
130#define RX_RCLD (1 << 0)
131#define RX_IA_MATCH (1 << 1)
132#define RX_NO_AD_MATCH (1 << 2)
133#define RX_NO_SFD (1 << 3)
134#define RX_SRT_FRM (1 << 7)
135#define RX_OVRRUN (1 << 8)
136#define RX_ALG_ERR (1 << 10)
137#define RX_CRC_ERR (1 << 11)
138#define RX_LEN_ERR (1 << 12)
139#define RX_RCV_OK (1 << 13)
140#define RX_TYP_LEN (1 << 15)
141
142/* Transmit status bits */
143
144#define TX_NCOL_MASK 0x0f
145#define TX_FRTL (1 << 4)
146#define TX_MAX_COL (1 << 5)
147#define TX_HRT_BEAT (1 << 6)
148#define TX_DEFER (1 << 7)
149#define TX_UND_RUN (1 << 8)
150#define TX_LOST_CTS (1 << 9)
151#define TX_LOST_CRS (1 << 10)
152#define TX_LTCOL (1 << 11)
153#define TX_OK (1 << 13)
154#define TX_COLL (1 << 15)
155
156struct i82593_conf_block {
157 u_char fifo_limit : 4,
158 forgnesi : 1,
159 fifo_32 : 1,
160 d6mod : 1,
161 throttle_enb : 1;
162 u_char throttle : 6,
163 cntrxint : 1,
164 contin : 1;
165 u_char addr_len : 3,
166 acloc : 1,
167 preamb_len : 2,
168 loopback : 2;
169 u_char lin_prio : 3,
170 tbofstop : 1,
171 exp_prio : 3,
172 bof_met : 1;
173 u_char : 4,
174 ifrm_spc : 4;
175 u_char : 5,
176 slottim_low : 3;
177 u_char slottim_hi : 3,
178 : 1,
179 max_retr : 4;
180 u_char prmisc : 1,
181 bc_dis : 1,
182 : 1,
183 crs_1 : 1,
184 nocrc_ins : 1,
185 crc_1632 : 1,
186 : 1,
187 crs_cdt : 1;
188 u_char cs_filter : 3,
189 crs_src : 1,
190 cd_filter : 3,
191 : 1;
192 u_char : 2,
193 min_fr_len : 6;
194 u_char lng_typ : 1,
195 lng_fld : 1,
196 rxcrc_xf : 1,
197 artx : 1,
198 sarec : 1,
199 tx_jabber : 1, /* why is this called max_len in the manual? */
200 hash_1 : 1,
201 lbpkpol : 1;
202 u_char : 6,
203 fdx : 1,
204 : 1;
205 u_char dummy_6 : 6, /* supposed to be ones */
206 mult_ia : 1,
207 dis_bof : 1;
208 u_char dummy_1 : 1, /* supposed to be one */
209 tx_ifs_retrig : 2,
210 mc_all : 1,
211 rcv_mon : 2,
212 frag_acpt : 1,
213 tstrttrs : 1;
214 u_char fretx : 1,
215 runt_eop : 1,
216 hw_sw_pin : 1,
217 big_endn : 1,
218 syncrqs : 1,
219 sttlen : 1,
220 tx_eop : 1,
221 rx_eop : 1;
222 u_char rbuf_size : 5,
223 rcvstop : 1,
224 : 2;
225};
226
227#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
228
229#endif /* _I82593_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 63ab3873c5ed..b1be39c76931 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -6,6 +6,7 @@
6 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> 6 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
7 * Copyright (c) 2005, Devicescape Software, Inc. 7 * Copyright (c) 2005, Devicescape Software, Inc.
8 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 8 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
9 * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -165,8 +166,12 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
165 166
166#define IEEE80211_MAX_MESH_ID_LEN 32 167#define IEEE80211_MAX_MESH_ID_LEN 32
167 168
169#define IEEE80211_FIRST_TSPEC_TSID 8
168#define IEEE80211_NUM_TIDS 16 170#define IEEE80211_NUM_TIDS 16
169 171
172/* number of user priorities 802.11 uses */
173#define IEEE80211_NUM_UPS 8
174
170#define IEEE80211_QOS_CTL_LEN 2 175#define IEEE80211_QOS_CTL_LEN 2
171/* 1d tag mask */ 176/* 1d tag mask */
172#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 177#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
@@ -838,6 +843,16 @@ enum ieee80211_vht_opmode_bits {
838 843
839#define WLAN_SA_QUERY_TR_ID_LEN 2 844#define WLAN_SA_QUERY_TR_ID_LEN 2
840 845
846/**
847 * struct ieee80211_tpc_report_ie
848 *
849 * This structure refers to "TPC Report element"
850 */
851struct ieee80211_tpc_report_ie {
852 u8 tx_power;
853 u8 link_margin;
854} __packed;
855
841struct ieee80211_mgmt { 856struct ieee80211_mgmt {
842 __le16 frame_control; 857 __le16 frame_control;
843 __le16 duration; 858 __le16 duration;
@@ -973,6 +988,13 @@ struct ieee80211_mgmt {
973 u8 action_code; 988 u8 action_code;
974 u8 operating_mode; 989 u8 operating_mode;
975 } __packed vht_opmode_notif; 990 } __packed vht_opmode_notif;
991 struct {
992 u8 action_code;
993 u8 dialog_token;
994 u8 tpc_elem_id;
995 u8 tpc_elem_length;
996 struct ieee80211_tpc_report_ie tpc;
997 } __packed tpc_report;
976 } u; 998 } u;
977 } __packed action; 999 } __packed action;
978 } u; 1000 } u;
@@ -1806,7 +1828,8 @@ enum ieee80211_eid {
1806 WLAN_EID_DMG_TSPEC = 146, 1828 WLAN_EID_DMG_TSPEC = 146,
1807 WLAN_EID_DMG_AT = 147, 1829 WLAN_EID_DMG_AT = 147,
1808 WLAN_EID_DMG_CAP = 148, 1830 WLAN_EID_DMG_CAP = 148,
1809 /* 149-150 reserved for Cisco */ 1831 /* 149 reserved for Cisco */
1832 WLAN_EID_CISCO_VENDOR_SPECIFIC = 150,
1810 WLAN_EID_DMG_OPERATION = 151, 1833 WLAN_EID_DMG_OPERATION = 151,
1811 WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, 1834 WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
1812 WLAN_EID_DMG_BEAM_REFINEMENT = 153, 1835 WLAN_EID_DMG_BEAM_REFINEMENT = 153,
@@ -1865,6 +1888,7 @@ enum ieee80211_category {
1865 WLAN_CATEGORY_DLS = 2, 1888 WLAN_CATEGORY_DLS = 2,
1866 WLAN_CATEGORY_BACK = 3, 1889 WLAN_CATEGORY_BACK = 3,
1867 WLAN_CATEGORY_PUBLIC = 4, 1890 WLAN_CATEGORY_PUBLIC = 4,
1891 WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
1868 WLAN_CATEGORY_HT = 7, 1892 WLAN_CATEGORY_HT = 7,
1869 WLAN_CATEGORY_SA_QUERY = 8, 1893 WLAN_CATEGORY_SA_QUERY = 8,
1870 WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, 1894 WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -2378,4 +2402,51 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
2378#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) 2402#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
2379#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) 2403#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
2380 2404
2405/**
2406 * ieee80211_action_contains_tpc - checks if the frame contains TPC element
2407 * @skb: the skb containing the frame, length will be checked
2408 *
2409 * This function checks if it's either TPC report action frame or Link
2410 * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
2411 * and 8.5.7.5 accordingly.
2412 */
2413static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
2414{
2415 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2416
2417 if (!ieee80211_is_action(mgmt->frame_control))
2418 return false;
2419
2420 if (skb->len < IEEE80211_MIN_ACTION_SIZE +
2421 sizeof(mgmt->u.action.u.tpc_report))
2422 return false;
2423
2424 /*
2425 * TPC report - check that:
2426 * category = 0 (Spectrum Management) or 5 (Radio Measurement)
2427 * spectrum management action = 3 (TPC/Link Measurement report)
2428 * TPC report EID = 35
2429 * TPC report element length = 2
2430 *
2431 * The spectrum management's tpc_report struct is used here both for
2432 * parsing tpc_report and radio measurement's link measurement report
2433 * frame, since the relevant part is identical in both frames.
2434 */
2435 if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT &&
2436 mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT)
2437 return false;
2438
2439 /* both spectrum mgmt and link measurement have same action code */
2440 if (mgmt->u.action.u.tpc_report.action_code !=
2441 WLAN_ACTION_SPCT_TPC_RPRT)
2442 return false;
2443
2444 if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT ||
2445 mgmt->u.action.u.tpc_report.tpc_elem_length !=
2446 sizeof(struct ieee80211_tpc_report_ie))
2447 return false;
2448
2449 return true;
2450}
2451
2381#endif /* LINUX_IEEE80211_H */ 2452#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6b2c7cf352a5..6f6929ea8a0c 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -60,6 +60,7 @@ struct macvlan_dev {
60#ifdef CONFIG_NET_POLL_CONTROLLER 60#ifdef CONFIG_NET_POLL_CONTROLLER
61 struct netpoll *netpoll; 61 struct netpoll *netpoll;
62#endif 62#endif
63 unsigned int macaddr_count;
63}; 64};
64 65
65static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 66static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f47550d75f85..2c677afeea47 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -39,6 +39,7 @@ static inline struct igmpv3_query *
39 39
40extern int sysctl_igmp_max_memberships; 40extern int sysctl_igmp_max_memberships;
41extern int sysctl_igmp_max_msf; 41extern int sysctl_igmp_max_msf;
42extern int sysctl_igmp_qrv;
42 43
43struct ip_sf_socklist { 44struct ip_sf_socklist {
44 unsigned int sl_max; 45 unsigned int sl_max;
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 4b79ffe7b188..fa76c79a52a1 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
84 put_device(&trig->dev); 84 put_device(&trig->dev);
85} 85}
86 86
87static inline void iio_trigger_get(struct iio_trigger *trig) 87static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
88{ 88{
89 get_device(&trig->dev); 89 get_device(&trig->dev);
90 __module_get(trig->ops->owner); 90 __module_get(trig->ops->owner);
91
92 return trig;
91} 93}
92 94
93/** 95/**
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 7cf5e9b32550..120ccc53fcb7 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,7 +15,7 @@ struct linux_binprm;
15 15
16#ifdef CONFIG_IMA 16#ifdef CONFIG_IMA
17extern int ima_bprm_check(struct linux_binprm *bprm); 17extern int ima_bprm_check(struct linux_binprm *bprm);
18extern int ima_file_check(struct file *file, int mask); 18extern int ima_file_check(struct file *file, int mask, int opened);
19extern void ima_file_free(struct file *file); 19extern void ima_file_free(struct file *file);
20extern int ima_file_mmap(struct file *file, unsigned long prot); 20extern int ima_file_mmap(struct file *file, unsigned long prot);
21extern int ima_module_check(struct file *file); 21extern int ima_module_check(struct file *file);
@@ -27,7 +27,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
27 return 0; 27 return 0;
28} 28}
29 29
30static inline int ima_file_check(struct file *file, int mask) 30static inline int ima_file_check(struct file *file, int mask, int opened)
31{ 31{
32 return 0; 32 return 0;
33} 33}
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2bb4c4f3531a..77fc43f8fb72 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -111,12 +111,21 @@ extern struct group_info init_groups;
111#ifdef CONFIG_PREEMPT_RCU 111#ifdef CONFIG_PREEMPT_RCU
112#define INIT_TASK_RCU_PREEMPT(tsk) \ 112#define INIT_TASK_RCU_PREEMPT(tsk) \
113 .rcu_read_lock_nesting = 0, \ 113 .rcu_read_lock_nesting = 0, \
114 .rcu_read_unlock_special = 0, \ 114 .rcu_read_unlock_special.s = 0, \
115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ 115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
116 INIT_TASK_RCU_TREE_PREEMPT() 116 INIT_TASK_RCU_TREE_PREEMPT()
117#else 117#else
118#define INIT_TASK_RCU_PREEMPT(tsk) 118#define INIT_TASK_RCU_PREEMPT(tsk)
119#endif 119#endif
120#ifdef CONFIG_TASKS_RCU
121#define INIT_TASK_RCU_TASKS(tsk) \
122 .rcu_tasks_holdout = false, \
123 .rcu_tasks_holdout_list = \
124 LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
125 .rcu_tasks_idle_cpu = -1,
126#else
127#define INIT_TASK_RCU_TASKS(tsk)
128#endif
120 129
121extern struct cred init_cred; 130extern struct cred init_cred;
122 131
@@ -224,6 +233,7 @@ extern struct task_group root_task_group;
224 INIT_FTRACE_GRAPH \ 233 INIT_FTRACE_GRAPH \
225 INIT_TRACE_RECURSION \ 234 INIT_TRACE_RECURSION \
226 INIT_TASK_RCU_PREEMPT(tsk) \ 235 INIT_TASK_RCU_PREEMPT(tsk) \
236 INIT_TASK_RCU_TASKS(tsk) \
227 INIT_CPUSET_SEQ(tsk) \ 237 INIT_CPUSET_SEQ(tsk) \
228 INIT_RT_MUTEXES(tsk) \ 238 INIT_RT_MUTEXES(tsk) \
229 INIT_VTIME(tsk) \ 239 INIT_VTIME(tsk) \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 698ad053d064..69517a24bc50 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id);
193/* The following three functions are for the core kernel use only. */ 193/* The following three functions are for the core kernel use only. */
194extern void suspend_device_irqs(void); 194extern void suspend_device_irqs(void);
195extern void resume_device_irqs(void); 195extern void resume_device_irqs(void);
196#ifdef CONFIG_PM_SLEEP
197extern int check_wakeup_irqs(void);
198#else
199static inline int check_wakeup_irqs(void) { return 0; }
200#endif
201 196
202/** 197/**
203 * struct irq_affinity_notify - context for notification of IRQ affinity changes 198 * struct irq_affinity_notify - context for notification of IRQ affinity changes
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 20f9a527922a..7b02bcc85b9e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -80,6 +80,7 @@ enum iommu_attr {
80 DOMAIN_ATTR_FSL_PAMU_STASH, 80 DOMAIN_ATTR_FSL_PAMU_STASH,
81 DOMAIN_ATTR_FSL_PAMU_ENABLE, 81 DOMAIN_ATTR_FSL_PAMU_ENABLE,
82 DOMAIN_ATTR_FSL_PAMUV1, 82 DOMAIN_ATTR_FSL_PAMUV1,
83 DOMAIN_ATTR_NESTING, /* two stages of translation */
83 DOMAIN_ATTR_MAX, 84 DOMAIN_ATTR_MAX,
84}; 85};
85 86
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 142ec544167c..2c5250222278 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s,
215 215
216/* Wrappers for managed devices */ 216/* Wrappers for managed devices */
217struct device; 217struct device;
218
219extern int devm_request_resource(struct device *dev, struct resource *root,
220 struct resource *new);
221extern void devm_release_resource(struct device *dev, struct resource *new);
222
218#define devm_request_region(dev,start,n,name) \ 223#define devm_request_region(dev,start,n,name) \
219 __devm_request_region(dev, &ioport_resource, (start), (n), (name)) 224 __devm_request_region(dev, &ioport_resource, (start), (n), (name))
220#define devm_request_mem_region(dev,start,n,name) \ 225#define devm_request_mem_region(dev,start,n,name) \
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
index 1888e06ddf64..8bddc3fbdddf 100644
--- a/include/linux/ipack.h
+++ b/include/linux/ipack.h
@@ -172,6 +172,7 @@ struct ipack_bus_ops {
172 * @ops: bus operations for the mezzanine drivers 172 * @ops: bus operations for the mezzanine drivers
173 */ 173 */
174struct ipack_bus_device { 174struct ipack_bus_device {
175 struct module *owner;
175 struct device *parent; 176 struct device *parent;
176 int slots; 177 int slots;
177 int bus_nr; 178 int bus_nr;
@@ -189,7 +190,8 @@ struct ipack_bus_device {
189 * available bus device in ipack. 190 * available bus device in ipack.
190 */ 191 */
191struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, 192struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
192 const struct ipack_bus_ops *ops); 193 const struct ipack_bus_ops *ops,
194 struct module *owner);
193 195
194/** 196/**
195 * ipack_bus_unregister -- unregister an ipack bus 197 * ipack_bus_unregister -- unregister an ipack bus
@@ -265,3 +267,23 @@ void ipack_put_device(struct ipack_device *dev);
265 .format = (_format), \ 267 .format = (_format), \
266 .vendor = (vend), \ 268 .vendor = (vend), \
267 .device = (dev) 269 .device = (dev)
270
271/**
272 * ipack_get_carrier - it increase the carrier ref. counter of
273 * the carrier module
274 * @dev: mezzanine device which wants to get the carrier
275 */
276static inline int ipack_get_carrier(struct ipack_device *dev)
277{
278 return try_module_get(dev->bus->owner);
279}
280
281/**
282 * ipack_get_carrier - it decrease the carrier ref. counter of
283 * the carrier module
284 * @dev: mezzanine device which wants to get the carrier
285 */
286static inline void ipack_put_carrier(struct ipack_device *dev)
287{
288 module_put(dev->bus->owner);
289}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62af59242ddc..03f48d936f66 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -173,6 +173,7 @@ struct irq_data {
173 * IRQD_IRQ_DISABLED - Disabled state of the interrupt 173 * IRQD_IRQ_DISABLED - Disabled state of the interrupt
174 * IRQD_IRQ_MASKED - Masked state of the interrupt 174 * IRQD_IRQ_MASKED - Masked state of the interrupt
175 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 175 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
176 * IRQD_WAKEUP_ARMED - Wakeup mode armed
176 */ 177 */
177enum { 178enum {
178 IRQD_TRIGGER_MASK = 0xf, 179 IRQD_TRIGGER_MASK = 0xf,
@@ -186,6 +187,7 @@ enum {
186 IRQD_IRQ_DISABLED = (1 << 16), 187 IRQD_IRQ_DISABLED = (1 << 16),
187 IRQD_IRQ_MASKED = (1 << 17), 188 IRQD_IRQ_MASKED = (1 << 17),
188 IRQD_IRQ_INPROGRESS = (1 << 18), 189 IRQD_IRQ_INPROGRESS = (1 << 18),
190 IRQD_WAKEUP_ARMED = (1 << 19),
189}; 191};
190 192
191static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 193static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d)
257 return d->state_use_accessors & IRQD_IRQ_INPROGRESS; 259 return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
258} 260}
259 261
262static inline bool irqd_is_wakeup_armed(struct irq_data *d)
263{
264 return d->state_use_accessors & IRQD_WAKEUP_ARMED;
265}
266
267
260/* 268/*
261 * Functions for chained handlers which can be enabled/disabled by the 269 * Functions for chained handlers which can be enabled/disabled by the
262 * standard disable_irq/enable_irq calls. Must be called with 270 * standard disable_irq/enable_irq calls. Must be called with
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index bf9422c3aefe..bf3fe719c7ce 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -39,9 +39,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu);
39#endif 39#endif
40 40
41void irq_work_run(void); 41void irq_work_run(void);
42void irq_work_tick(void);
42void irq_work_sync(struct irq_work *work); 43void irq_work_sync(struct irq_work *work);
43 44
44#ifdef CONFIG_IRQ_WORK 45#ifdef CONFIG_IRQ_WORK
46#include <asm/irq_work.h>
47
45bool irq_work_needs_cpu(void); 48bool irq_work_needs_cpu(void);
46#else 49#else
47static inline bool irq_work_needs_cpu(void) { return false; } 50static inline bool irq_work_needs_cpu(void) { return false; }
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 45e2d8c15bd2..13eed92c7d24 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -21,7 +21,11 @@
21#define GIC_CPU_ACTIVEPRIO 0xd0 21#define GIC_CPU_ACTIVEPRIO 0xd0
22#define GIC_CPU_IDENT 0xfc 22#define GIC_CPU_IDENT 0xfc
23 23
24#define GICC_ENABLE 0x1
25#define GICC_INT_PRI_THRESHOLD 0xf0
24#define GICC_IAR_INT_ID_MASK 0x3ff 26#define GICC_IAR_INT_ID_MASK 0x3ff
27#define GICC_INT_SPURIOUS 1023
28#define GICC_DIS_BYPASS_MASK 0x1e0
25 29
26#define GIC_DIST_CTRL 0x000 30#define GIC_DIST_CTRL 0x000
27#define GIC_DIST_CTR 0x004 31#define GIC_DIST_CTR 0x004
@@ -39,6 +43,18 @@
39#define GIC_DIST_SGI_PENDING_CLEAR 0xf10 43#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
40#define GIC_DIST_SGI_PENDING_SET 0xf20 44#define GIC_DIST_SGI_PENDING_SET 0xf20
41 45
46#define GICD_ENABLE 0x1
47#define GICD_DISABLE 0x0
48#define GICD_INT_ACTLOW_LVLTRIG 0x0
49#define GICD_INT_EN_CLR_X32 0xffffffff
50#define GICD_INT_EN_SET_SGI 0x0000ffff
51#define GICD_INT_EN_CLR_PPI 0xffff0000
52#define GICD_INT_DEF_PRI 0xa0
53#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
54 (GICD_INT_DEF_PRI << 16) |\
55 (GICD_INT_DEF_PRI << 8) |\
56 GICD_INT_DEF_PRI)
57
42#define GICH_HCR 0x0 58#define GICH_HCR 0x0
43#define GICH_VTR 0x4 59#define GICH_VTR 0x4
44#define GICH_VMCR 0x8 60#define GICH_VMCR 0x8
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
new file mode 100644
index 000000000000..e06b370cfc0d
--- /dev/null
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -0,0 +1,32 @@
1/**
2 * irq-omap-intc.h - INTC Idle Functions
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Author: Felipe Balbi <balbi@ti.com>
7 *
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 of
10 * the License as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
19#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
20
21void omap2_init_irq(void);
22void omap3_init_irq(void);
23void ti81xx_init_irq(void);
24
25int omap_irq_pending(void);
26void omap_intc_save_context(void);
27void omap_intc_restore_context(void);
28void omap3_intc_suspend(void);
29void omap3_intc_prepare_idle(void);
30void omap3_intc_resume_idle(void);
31
32#endif /* __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 472c021a2d4f..faf433af425e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -12,6 +12,8 @@ struct irq_affinity_notify;
12struct proc_dir_entry; 12struct proc_dir_entry;
13struct module; 13struct module;
14struct irq_desc; 14struct irq_desc;
15struct irq_domain;
16struct pt_regs;
15 17
16/** 18/**
17 * struct irq_desc - interrupt descriptor 19 * struct irq_desc - interrupt descriptor
@@ -36,6 +38,11 @@ struct irq_desc;
36 * @threads_oneshot: bitfield to handle shared oneshot threads 38 * @threads_oneshot: bitfield to handle shared oneshot threads
37 * @threads_active: number of irqaction threads currently running 39 * @threads_active: number of irqaction threads currently running
38 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 40 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
41 * @nr_actions: number of installed actions on this descriptor
42 * @no_suspend_depth: number of irqactions on a irq descriptor with
43 * IRQF_NO_SUSPEND set
44 * @force_resume_depth: number of irqactions on a irq descriptor with
45 * IRQF_FORCE_RESUME set
39 * @dir: /proc/irq/ procfs entry 46 * @dir: /proc/irq/ procfs entry
40 * @name: flow handler name for /proc/interrupts output 47 * @name: flow handler name for /proc/interrupts output
41 */ 48 */
@@ -68,6 +75,11 @@ struct irq_desc {
68 unsigned long threads_oneshot; 75 unsigned long threads_oneshot;
69 atomic_t threads_active; 76 atomic_t threads_active;
70 wait_queue_head_t wait_for_threads; 77 wait_queue_head_t wait_for_threads;
78#ifdef CONFIG_PM_SLEEP
79 unsigned int nr_actions;
80 unsigned int no_suspend_depth;
81 unsigned int force_resume_depth;
82#endif
71#ifdef CONFIG_PROC_FS 83#ifdef CONFIG_PROC_FS
72 struct proc_dir_entry *dir; 84 struct proc_dir_entry *dir;
73#endif 85#endif
@@ -118,6 +130,23 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de
118 130
119int generic_handle_irq(unsigned int irq); 131int generic_handle_irq(unsigned int irq);
120 132
133#ifdef CONFIG_HANDLE_DOMAIN_IRQ
134/*
135 * Convert a HW interrupt number to a logical one using a IRQ domain,
136 * and handle the result interrupt number. Return -EINVAL if
137 * conversion failed. Providing a NULL domain indicates that the
138 * conversion has already been done.
139 */
140int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
141 bool lookup, struct pt_regs *regs);
142
143static inline int handle_domain_irq(struct irq_domain *domain,
144 unsigned int hwirq, struct pt_regs *regs)
145{
146 return __handle_domain_irq(domain, hwirq, true, regs);
147}
148#endif
149
121/* Test to see if a driver has successfully requested an irq */ 150/* Test to see if a driver has successfully requested an irq */
122static inline int irq_has_action(unsigned int irq) 151static inline int irq_has_action(unsigned int irq)
123{ 152{
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d5b50a19463c..0dae71e9971c 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -159,7 +159,11 @@ typedef struct journal_header_s
159 * journal_block_tag (in the descriptor). The other h_chksum* fields are 159 * journal_block_tag (in the descriptor). The other h_chksum* fields are
160 * not used. 160 * not used.
161 * 161 *
162 * Checksum v1 and v2 are mutually exclusive features. 162 * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
163 * journal_block_tag3_t to store a full 32-bit checksum. Everything else
164 * is the same as v2.
165 *
166 * Checksum v1, v2, and v3 are mutually exclusive features.
163 */ 167 */
164struct commit_header { 168struct commit_header {
165 __be32 h_magic; 169 __be32 h_magic;
@@ -179,6 +183,14 @@ struct commit_header {
179 * raw struct shouldn't be used for pointer math or sizeof() - use 183 * raw struct shouldn't be used for pointer math or sizeof() - use
180 * journal_tag_bytes(journal) instead to compute this. 184 * journal_tag_bytes(journal) instead to compute this.
181 */ 185 */
186typedef struct journal_block_tag3_s
187{
188 __be32 t_blocknr; /* The on-disk block number */
189 __be32 t_flags; /* See below */
190 __be32 t_blocknr_high; /* most-significant high 32bits. */
191 __be32 t_checksum; /* crc32c(uuid+seq+block) */
192} journal_block_tag3_t;
193
182typedef struct journal_block_tag_s 194typedef struct journal_block_tag_s
183{ 195{
184 __be32 t_blocknr; /* The on-disk block number */ 196 __be32 t_blocknr; /* The on-disk block number */
@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
187 __be32 t_blocknr_high; /* most-significant high 32bits. */ 199 __be32 t_blocknr_high; /* most-significant high 32bits. */
188} journal_block_tag_t; 200} journal_block_tag_t;
189 201
190#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
191#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
192
193/* Tail of descriptor block, for checksumming */ 202/* Tail of descriptor block, for checksumming */
194struct jbd2_journal_block_tail { 203struct jbd2_journal_block_tail {
195 __be32 t_checksum; /* crc32c(uuid+descr_block) */ 204 __be32 t_checksum; /* crc32c(uuid+descr_block) */
@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
284#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 293#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
285#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 294#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
286#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 295#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
296#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
287 297
288/* Features known to this kernel version: */ 298/* Features known to this kernel version: */
289#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM 299#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
291#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ 301#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
292 JBD2_FEATURE_INCOMPAT_64BIT | \ 302 JBD2_FEATURE_INCOMPAT_64BIT | \
293 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ 303 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
294 JBD2_FEATURE_INCOMPAT_CSUM_V2) 304 JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
305 JBD2_FEATURE_INCOMPAT_CSUM_V3)
295 306
296#ifdef __KERNEL__ 307#ifdef __KERNEL__
297 308
@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
1296extern int jbd2_journal_blocks_per_page(struct inode *inode); 1307extern int jbd2_journal_blocks_per_page(struct inode *inode);
1297extern size_t journal_tag_bytes(journal_t *journal); 1308extern size_t journal_tag_bytes(journal_t *journal);
1298 1309
1310static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
1311{
1312 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
1313 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
1314 return 1;
1315
1316 return 0;
1317}
1318
1299/* 1319/*
1300 * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for 1320 * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
1301 * transaction control blocks. 1321 * transaction control blocks.
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1f44466c1e9d..c367cbdf73ab 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
258#define SEC_JIFFIE_SC (32 - SHIFT_HZ) 258#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
259#endif 259#endif
260#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) 260#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
261#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
262#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ 261#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
263 TICK_NSEC -1) / (u64)TICK_NSEC)) 262 TICK_NSEC -1) / (u64)TICK_NSEC))
264 263
265#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ 264#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
266 TICK_NSEC -1) / (u64)TICK_NSEC)) 265 TICK_NSEC -1) / (u64)TICK_NSEC))
267#define USEC_CONVERSION \
268 ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
269 TICK_NSEC -1) / (u64)TICK_NSEC))
270/*
271 * USEC_ROUND is used in the timeval to jiffie conversion. See there
272 * for more details. It is the scaled resolution rounding value. Note
273 * that it is a 64-bit value. Since, when it is applied, we are already
274 * in jiffies (albit scaled), it is nothing but the bits we will shift
275 * off.
276 */
277#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
278/* 266/*
279 * The maximum jiffie value is (MAX_INT >> 1). Here we translate that 267 * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
280 * into seconds. The 64-bit case will overflow if we are not careful, 268 * into seconds. The 64-bit case will overflow if we are not careful,
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 784304b222b3..98f923b6a0ea 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -8,28 +8,28 @@
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
9 * 9 *
10 * Jump labels provide an interface to generate dynamic branches using 10 * Jump labels provide an interface to generate dynamic branches using
11 * self-modifying code. Assuming toolchain and architecture support the result 11 * self-modifying code. Assuming toolchain and architecture support, the result
12 * of a "if (static_key_false(&key))" statement is a unconditional branch (which 12 * of a "if (static_key_false(&key))" statement is an unconditional branch (which
13 * defaults to false - and the true block is placed out of line). 13 * defaults to false - and the true block is placed out of line).
14 * 14 *
15 * However at runtime we can change the branch target using 15 * However at runtime we can change the branch target using
16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key 16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
17 * object and for as long as there are references all branches referring to 17 * object, and for as long as there are references all branches referring to
18 * that particular key will point to the (out of line) true block. 18 * that particular key will point to the (out of line) true block.
19 * 19 *
20 * Since this relies on modifying code the static_key_slow_{inc,dec}() functions 20 * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
21 * must be considered absolute slow paths (machine wide synchronization etc.). 21 * must be considered absolute slow paths (machine wide synchronization etc.).
22 * OTOH, since the affected branches are unconditional their runtime overhead 22 * OTOH, since the affected branches are unconditional, their runtime overhead
23 * will be absolutely minimal, esp. in the default (off) case where the total 23 * will be absolutely minimal, esp. in the default (off) case where the total
24 * effect is a single NOP of appropriate size. The on case will patch in a jump 24 * effect is a single NOP of appropriate size. The on case will patch in a jump
25 * to the out-of-line block. 25 * to the out-of-line block.
26 * 26 *
27 * When the control is directly exposed to userspace it is prudent to delay the 27 * When the control is directly exposed to userspace, it is prudent to delay the
28 * decrement to avoid high frequency code modifications which can (and do) 28 * decrement to avoid high frequency code modifications which can (and do)
29 * cause significant performance degradation. Struct static_key_deferred and 29 * cause significant performance degradation. Struct static_key_deferred and
30 * static_key_slow_dec_deferred() provide for this. 30 * static_key_slow_dec_deferred() provide for this.
31 * 31 *
32 * Lacking toolchain and or architecture support, it falls back to a simple 32 * Lacking toolchain and or architecture support, jump labels fall back to a simple
33 * conditional branch. 33 * conditional branch.
34 * 34 *
35 * struct static_key my_key = STATIC_KEY_INIT_TRUE; 35 * struct static_key my_key = STATIC_KEY_INIT_TRUE;
@@ -43,8 +43,7 @@
43 * 43 *
44 * Not initializing the key (static data is initialized to 0s anyway) is the 44 * Not initializing the key (static data is initialized to 0s anyway) is the
45 * same as using STATIC_KEY_INIT_FALSE. 45 * same as using STATIC_KEY_INIT_FALSE.
46 * 46 */
47*/
48 47
49#include <linux/types.h> 48#include <linux/types.h>
50#include <linux/compiler.h> 49#include <linux/compiler.h>
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 95624bed87ef..35c8ffb0136f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -496,6 +496,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
496 496
497extern int hex_to_bin(char ch); 497extern int hex_to_bin(char ch);
498extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); 498extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
499extern char *bin2hex(char *dst, const void *src, size_t count);
499 500
500bool mac_pton(const char *s, u8 *mac); 501bool mac_pton(const char *s, u8 *mac);
501 502
@@ -715,23 +716,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
715 (void) (&_max1 == &_max2); \ 716 (void) (&_max1 == &_max2); \
716 _max1 > _max2 ? _max1 : _max2; }) 717 _max1 > _max2 ? _max1 : _max2; })
717 718
718#define min3(x, y, z) ({ \ 719#define min3(x, y, z) min((typeof(x))min(x, y), z)
719 typeof(x) _min1 = (x); \ 720#define max3(x, y, z) max((typeof(x))max(x, y), z)
720 typeof(y) _min2 = (y); \
721 typeof(z) _min3 = (z); \
722 (void) (&_min1 == &_min2); \
723 (void) (&_min1 == &_min3); \
724 _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
725 (_min2 < _min3 ? _min2 : _min3); })
726
727#define max3(x, y, z) ({ \
728 typeof(x) _max1 = (x); \
729 typeof(y) _max2 = (y); \
730 typeof(z) _max3 = (z); \
731 (void) (&_max1 == &_max2); \
732 (void) (&_max1 == &_max3); \
733 _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
734 (_max2 > _max3 ? _max2 : _max3); })
735 721
736/** 722/**
737 * min_not_zero - return the minimum that is _not_ zero, unless both are zero 723 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
@@ -746,20 +732,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
746/** 732/**
747 * clamp - return a value clamped to a given range with strict typechecking 733 * clamp - return a value clamped to a given range with strict typechecking
748 * @val: current value 734 * @val: current value
749 * @min: minimum allowable value 735 * @lo: lowest allowable value
750 * @max: maximum allowable value 736 * @hi: highest allowable value
751 * 737 *
752 * This macro does strict typechecking of min/max to make sure they are of the 738 * This macro does strict typechecking of lo/hi to make sure they are of the
753 * same type as val. See the unnecessary pointer comparisons. 739 * same type as val. See the unnecessary pointer comparisons.
754 */ 740 */
755#define clamp(val, min, max) ({ \ 741#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
756 typeof(val) __val = (val); \
757 typeof(min) __min = (min); \
758 typeof(max) __max = (max); \
759 (void) (&__val == &__min); \
760 (void) (&__val == &__max); \
761 __val = __val < __min ? __min: __val; \
762 __val > __max ? __max: __val; })
763 742
764/* 743/*
765 * ..and if you can't take the strict 744 * ..and if you can't take the strict
@@ -781,36 +760,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
781 * clamp_t - return a value clamped to a given range using a given type 760 * clamp_t - return a value clamped to a given range using a given type
782 * @type: the type of variable to use 761 * @type: the type of variable to use
783 * @val: current value 762 * @val: current value
784 * @min: minimum allowable value 763 * @lo: minimum allowable value
785 * @max: maximum allowable value 764 * @hi: maximum allowable value
786 * 765 *
787 * This macro does no typechecking and uses temporary variables of type 766 * This macro does no typechecking and uses temporary variables of type
788 * 'type' to make all the comparisons. 767 * 'type' to make all the comparisons.
789 */ 768 */
790#define clamp_t(type, val, min, max) ({ \ 769#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
791 type __val = (val); \
792 type __min = (min); \
793 type __max = (max); \
794 __val = __val < __min ? __min: __val; \
795 __val > __max ? __max: __val; })
796 770
797/** 771/**
798 * clamp_val - return a value clamped to a given range using val's type 772 * clamp_val - return a value clamped to a given range using val's type
799 * @val: current value 773 * @val: current value
800 * @min: minimum allowable value 774 * @lo: minimum allowable value
801 * @max: maximum allowable value 775 * @hi: maximum allowable value
802 * 776 *
803 * This macro does no typechecking and uses temporary variables of whatever 777 * This macro does no typechecking and uses temporary variables of whatever
804 * type the input argument 'val' is. This is useful when val is an unsigned 778 * type the input argument 'val' is. This is useful when val is an unsigned
805 * type and min and max are literals that will otherwise be assigned a signed 779 * type and min and max are literals that will otherwise be assigned a signed
806 * integer type. 780 * integer type.
807 */ 781 */
808#define clamp_val(val, min, max) ({ \ 782#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
809 typeof(val) __val = (val); \
810 typeof(val) __min = (min); \
811 typeof(val) __max = (max); \
812 __val = __val < __min ? __min: __val; \
813 __val > __max ? __max: __val; })
814 783
815 784
816/* 785/*
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 44792ee649de..ff9f1d394235 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -53,6 +53,24 @@ typedef int (*request_key_actor_t)(struct key_construction *key,
53 const char *op, void *aux); 53 const char *op, void *aux);
54 54
55/* 55/*
56 * Preparsed matching criterion.
57 */
58struct key_match_data {
59 /* Comparison function, defaults to exact description match, but can be
60 * overridden by type->match_preparse(). Should return true if a match
61 * is found and false if not.
62 */
63 bool (*cmp)(const struct key *key,
64 const struct key_match_data *match_data);
65
66 const void *raw_data; /* Raw match data */
67 void *preparsed; /* For ->match_preparse() to stash stuff */
68 unsigned lookup_type; /* Type of lookup for this search. */
69#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
70#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
71};
72
73/*
56 * kernel managed key type definition 74 * kernel managed key type definition
57 */ 75 */
58struct key_type { 76struct key_type {
@@ -65,11 +83,6 @@ struct key_type {
65 */ 83 */
66 size_t def_datalen; 84 size_t def_datalen;
67 85
68 /* Default key search algorithm. */
69 unsigned def_lookup_type;
70#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
71#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
72
73 /* vet a description */ 86 /* vet a description */
74 int (*vet_description)(const char *description); 87 int (*vet_description)(const char *description);
75 88
@@ -96,8 +109,15 @@ struct key_type {
96 */ 109 */
97 int (*update)(struct key *key, struct key_preparsed_payload *prep); 110 int (*update)(struct key *key, struct key_preparsed_payload *prep);
98 111
99 /* match a key against a description */ 112 /* Preparse the data supplied to ->match() (optional). The
100 int (*match)(const struct key *key, const void *desc); 113 * data to be preparsed can be found in match_data->raw_data.
114 * The lookup type can also be set by this function.
115 */
116 int (*match_preparse)(struct key_match_data *match_data);
117
118 /* Free preparsed match data (optional). This should be supplied it
119 * ->match_preparse() is supplied. */
120 void (*match_free)(struct key_match_data *match_data);
101 121
102 /* clear some of the data from a key on revokation (optional) 122 /* clear some of the data from a key on revokation (optional)
103 * - the key's semaphore will be write-locked by the caller 123 * - the key's semaphore will be write-locked by the caller
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 554fde3a3927..473b43678ad1 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -722,7 +722,7 @@ __kfifo_uint_must_check_helper( \
722/** 722/**
723 * kfifo_dma_out_finish - finish a DMA OUT operation 723 * kfifo_dma_out_finish - finish a DMA OUT operation
724 * @fifo: address of the fifo to be used 724 * @fifo: address of the fifo to be used
725 * @len: number of bytes transferrd 725 * @len: number of bytes transferred
726 * 726 *
727 * This macro finish a DMA OUT operation. The out counter will be updated by 727 * This macro finish a DMA OUT operation. The out counter will be updated by
728 * the len parameter. No error checking will be done. 728 * the len parameter. No error checking will be done.
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a4c33b34fe3f..28be31f49250 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -136,12 +136,11 @@ static inline bool is_error_page(struct page *page)
136#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 136#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
137#define KVM_REQ_ENABLE_IBS 23 137#define KVM_REQ_ENABLE_IBS 23
138#define KVM_REQ_DISABLE_IBS 24 138#define KVM_REQ_DISABLE_IBS 24
139#define KVM_REQ_APIC_PAGE_RELOAD 25
139 140
140#define KVM_USERSPACE_IRQ_SOURCE_ID 0 141#define KVM_USERSPACE_IRQ_SOURCE_ID 0
141#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 142#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
142 143
143struct kvm;
144struct kvm_vcpu;
145extern struct kmem_cache *kvm_vcpu_cache; 144extern struct kmem_cache *kvm_vcpu_cache;
146 145
147extern spinlock_t kvm_lock; 146extern spinlock_t kvm_lock;
@@ -200,6 +199,17 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
200int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 199int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
201#endif 200#endif
202 201
202/*
203 * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
204 * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
205 * controls whether we retry the gup one more time to completion in that case.
206 * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
207 * handler.
208 */
209int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
210 unsigned long addr, bool write_fault,
211 struct page **pagep);
212
203enum { 213enum {
204 OUTSIDE_GUEST_MODE, 214 OUTSIDE_GUEST_MODE,
205 IN_GUEST_MODE, 215 IN_GUEST_MODE,
@@ -325,8 +335,6 @@ struct kvm_kernel_irq_routing_entry {
325 struct hlist_node link; 335 struct hlist_node link;
326}; 336};
327 337
328struct kvm_irq_routing_table;
329
330#ifndef KVM_PRIVATE_MEM_SLOTS 338#ifndef KVM_PRIVATE_MEM_SLOTS
331#define KVM_PRIVATE_MEM_SLOTS 0 339#define KVM_PRIVATE_MEM_SLOTS 0
332#endif 340#endif
@@ -528,6 +536,8 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
528unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 536unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
529unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 537unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
530unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 538unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
539unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
540 bool *writable);
531void kvm_release_page_clean(struct page *page); 541void kvm_release_page_clean(struct page *page);
532void kvm_release_page_dirty(struct page *page); 542void kvm_release_page_dirty(struct page *page);
533void kvm_set_page_accessed(struct page *page); 543void kvm_set_page_accessed(struct page *page);
@@ -579,6 +589,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
579void kvm_reload_remote_mmus(struct kvm *kvm); 589void kvm_reload_remote_mmus(struct kvm *kvm);
580void kvm_make_mclock_inprogress_request(struct kvm *kvm); 590void kvm_make_mclock_inprogress_request(struct kvm *kvm);
581void kvm_make_scan_ioapic_request(struct kvm *kvm); 591void kvm_make_scan_ioapic_request(struct kvm *kvm);
592bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
582 593
583long kvm_arch_dev_ioctl(struct file *filp, 594long kvm_arch_dev_ioctl(struct file *filp,
584 unsigned int ioctl, unsigned long arg); 595 unsigned int ioctl, unsigned long arg);
@@ -624,6 +635,8 @@ void kvm_arch_exit(void);
624int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 635int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
625void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 636void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
626 637
638void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
639
627void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 640void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
628void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 641void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
629void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 642void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
@@ -632,8 +645,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
632int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 645int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
633void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 646void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
634 647
635int kvm_arch_hardware_enable(void *garbage); 648int kvm_arch_hardware_enable(void);
636void kvm_arch_hardware_disable(void *garbage); 649void kvm_arch_hardware_disable(void);
637int kvm_arch_hardware_setup(void); 650int kvm_arch_hardware_setup(void);
638void kvm_arch_hardware_unsetup(void); 651void kvm_arch_hardware_unsetup(void);
639void kvm_arch_check_processor_compat(void *rtn); 652void kvm_arch_check_processor_compat(void *rtn);
@@ -1034,8 +1047,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1034 1047
1035extern bool kvm_rebooting; 1048extern bool kvm_rebooting;
1036 1049
1037struct kvm_device_ops;
1038
1039struct kvm_device { 1050struct kvm_device {
1040 struct kvm_device_ops *ops; 1051 struct kvm_device_ops *ops;
1041 struct kvm *kvm; 1052 struct kvm *kvm;
@@ -1068,12 +1079,10 @@ struct kvm_device_ops {
1068void kvm_device_get(struct kvm_device *dev); 1079void kvm_device_get(struct kvm_device *dev);
1069void kvm_device_put(struct kvm_device *dev); 1080void kvm_device_put(struct kvm_device *dev);
1070struct kvm_device *kvm_device_from_filp(struct file *filp); 1081struct kvm_device *kvm_device_from_filp(struct file *filp);
1082int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1071 1083
1072extern struct kvm_device_ops kvm_mpic_ops; 1084extern struct kvm_device_ops kvm_mpic_ops;
1073extern struct kvm_device_ops kvm_xics_ops; 1085extern struct kvm_device_ops kvm_xics_ops;
1074extern struct kvm_device_ops kvm_vfio_ops;
1075extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1076extern struct kvm_device_ops kvm_flic_ops;
1077 1086
1078#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1087#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1079 1088
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index b0bcce0ddc95..b606bb689a3e 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -17,6 +17,20 @@
17#ifndef __KVM_TYPES_H__ 17#ifndef __KVM_TYPES_H__
18#define __KVM_TYPES_H__ 18#define __KVM_TYPES_H__
19 19
20struct kvm;
21struct kvm_async_pf;
22struct kvm_device_ops;
23struct kvm_interrupt;
24struct kvm_irq_routing_table;
25struct kvm_memory_slot;
26struct kvm_one_reg;
27struct kvm_run;
28struct kvm_userspace_memory_region;
29struct kvm_vcpu;
30struct kvm_vcpu_init;
31
32enum kvm_mr_change;
33
20#include <asm/types.h> 34#include <asm/types.h>
21 35
22/* 36/*
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6a599dce7f9d..e43686472197 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -15,6 +15,7 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/rwsem.h> 17#include <linux/rwsem.h>
18#include <linux/timer.h>
18#include <linux/workqueue.h> 19#include <linux/workqueue.h>
19 20
20struct device; 21struct device;
@@ -68,7 +69,7 @@ struct led_classdev {
68 const char *default_trigger; /* Trigger to use */ 69 const char *default_trigger; /* Trigger to use */
69 70
70 unsigned long blink_delay_on, blink_delay_off; 71 unsigned long blink_delay_on, blink_delay_off;
71 struct delayed_work blink_work; 72 struct timer_list blink_timer;
72 int blink_brightness; 73 int blink_brightness;
73 74
74 struct work_struct set_brightness_work; 75 struct work_struct set_brightness_work;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 92abb497ab14..bd5fefeaf548 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1404,14 +1404,14 @@ static inline int sata_srst_pmp(struct ata_link *link)
1404 * printk helpers 1404 * printk helpers
1405 */ 1405 */
1406__printf(3, 4) 1406__printf(3, 4)
1407int ata_port_printk(const struct ata_port *ap, const char *level, 1407void ata_port_printk(const struct ata_port *ap, const char *level,
1408 const char *fmt, ...); 1408 const char *fmt, ...);
1409__printf(3, 4) 1409__printf(3, 4)
1410int ata_link_printk(const struct ata_link *link, const char *level, 1410void ata_link_printk(const struct ata_link *link, const char *level,
1411 const char *fmt, ...); 1411 const char *fmt, ...);
1412__printf(3, 4) 1412__printf(3, 4)
1413int ata_dev_printk(const struct ata_device *dev, const char *level, 1413void ata_dev_printk(const struct ata_device *dev, const char *level,
1414 const char *fmt, ...); 1414 const char *fmt, ...);
1415 1415
1416#define ata_port_err(ap, fmt, ...) \ 1416#define ata_port_err(ap, fmt, ...) \
1417 ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) 1417 ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 219d79627c05..ff82a32871b5 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -178,7 +178,6 @@ struct nlm_block {
178 unsigned char b_granted; /* VFS granted lock */ 178 unsigned char b_granted; /* VFS granted lock */
179 struct nlm_file * b_file; /* file in question */ 179 struct nlm_file * b_file; /* file in question */
180 struct cache_req * b_cache_req; /* deferred request handling */ 180 struct cache_req * b_cache_req; /* deferred request handling */
181 struct file_lock * b_fl; /* set for GETLK */
182 struct cache_deferred_req * b_deferred_req; 181 struct cache_deferred_req * b_deferred_req;
183 unsigned int b_flags; /* block flags */ 182 unsigned int b_flags; /* block flags */
184#define B_QUEUED 1 /* lock queued */ 183#define B_QUEUED 1 /* lock queued */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 008388f920d7..74ab23176e9b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * 6 *
7 * see Documentation/lockdep-design.txt for more details. 7 * see Documentation/locking/lockdep-design.txt for more details.
8 */ 8 */
9#ifndef __LINUX_LOCKDEP_H 9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H 10#define __LINUX_LOCKDEP_H
@@ -362,6 +362,10 @@ extern void lockdep_trace_alloc(gfp_t mask);
362 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 362 WARN_ON(debug_locks && !lockdep_is_held(l)); \
363 } while (0) 363 } while (0)
364 364
365#define lockdep_assert_held_once(l) do { \
366 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
367 } while (0)
368
365#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 369#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
366 370
367#else /* !CONFIG_LOCKDEP */ 371#else /* !CONFIG_LOCKDEP */
@@ -412,6 +416,7 @@ struct lock_class_key { };
412#define lockdep_depth(tsk) (0) 416#define lockdep_depth(tsk) (0)
413 417
414#define lockdep_assert_held(l) do { (void)(l); } while (0) 418#define lockdep_assert_held(l) do { (void)(l); } while (0)
419#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
415 420
416#define lockdep_recursing(tsk) (0) 421#define lockdep_recursing(tsk) (0)
417 422
@@ -505,6 +510,7 @@ static inline void print_irqtrace_events(struct task_struct *curr)
505 510
506#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 511#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
507#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 512#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
513#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
508#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 514#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
509 515
510#ifdef CONFIG_PROVE_LOCKING 516#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index d14af7b722ef..164aad1f9f12 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/uuid.h> 5#include <linux/uuid.h>
6#include <linux/mod_devicetable.h>
6 7
7struct mei_cl_device; 8struct mei_cl_device;
8 9
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0752d204d9e..19df5d857411 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -440,11 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
440 440
441int memcg_cache_id(struct mem_cgroup *memcg); 441int memcg_cache_id(struct mem_cgroup *memcg);
442 442
443int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
444 struct kmem_cache *root_cache);
445void memcg_free_cache_params(struct kmem_cache *s);
446
447int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
448void memcg_update_array_size(int num_groups); 443void memcg_update_array_size(int num_groups);
449 444
450struct kmem_cache * 445struct kmem_cache *
@@ -574,16 +569,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
574 return -1; 569 return -1;
575} 570}
576 571
577static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
578 struct kmem_cache *s, struct kmem_cache *root_cache)
579{
580 return 0;
581}
582
583static inline void memcg_free_cache_params(struct kmem_cache *s)
584{
585}
586
587static inline struct kmem_cache * 572static inline struct kmem_cache *
588memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 573memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
589{ 574{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d9524c49d767..8f1a41951df9 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
84extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 84extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
85/* VM interface that may be used by firmware interface */ 85/* VM interface that may be used by firmware interface */
86extern int online_pages(unsigned long, unsigned long, int); 86extern int online_pages(unsigned long, unsigned long, int);
87extern int test_pages_in_a_zone(unsigned long, unsigned long);
87extern void __offline_isolated_pages(unsigned long, unsigned long); 88extern void __offline_isolated_pages(unsigned long, unsigned long);
88 89
89typedef void (*online_page_callback_t)(struct page *page); 90typedef void (*online_page_callback_t)(struct page *page);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index f230a978e6ba..3d385c81c153 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -134,9 +134,10 @@ void mpol_free_shared_policy(struct shared_policy *p);
134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
135 unsigned long idx); 135 unsigned long idx);
136 136
137struct mempolicy *get_vma_policy(struct task_struct *tsk, 137struct mempolicy *get_task_policy(struct task_struct *p);
138 struct vm_area_struct *vma, unsigned long addr); 138struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
139bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); 139 unsigned long addr);
140bool vma_policy_mof(struct vm_area_struct *vma);
140 141
141extern void numa_default_policy(void); 142extern void numa_default_policy(void);
142extern void numa_policy_init(void); 143extern void numa_policy_init(void);
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index c466ff3e16b8..d0e578fd7053 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -251,6 +251,15 @@ enum max77693_haptic_reg {
251 MAX77693_HAPTIC_REG_END, 251 MAX77693_HAPTIC_REG_END,
252}; 252};
253 253
254/* max77693-pmic LSCNFG configuraton register */
255#define MAX77693_PMIC_LOW_SYS_MASK 0x80
256#define MAX77693_PMIC_LOW_SYS_SHIFT 7
257
258/* max77693-haptic configuration register */
259#define MAX77693_CONFIG2_MODE 7
260#define MAX77693_CONFIG2_MEN 6
261#define MAX77693_CONFIG2_HTYP 5
262
254enum max77693_irq_source { 263enum max77693_irq_source {
255 LED_INT = 0, 264 LED_INT = 0,
256 TOPSYS_INT, 265 TOPSYS_INT,
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index b5f73de81aad..1825edacbda7 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -14,6 +14,27 @@
14#ifndef __LINUX_MFD_SEC_CORE_H 14#ifndef __LINUX_MFD_SEC_CORE_H
15#define __LINUX_MFD_SEC_CORE_H 15#define __LINUX_MFD_SEC_CORE_H
16 16
17/* Macros to represent minimum voltages for LDO/BUCK */
18#define MIN_3000_MV 3000000
19#define MIN_2500_MV 2500000
20#define MIN_2000_MV 2000000
21#define MIN_1800_MV 1800000
22#define MIN_1500_MV 1500000
23#define MIN_1400_MV 1400000
24#define MIN_1000_MV 1000000
25
26#define MIN_900_MV 900000
27#define MIN_850_MV 850000
28#define MIN_800_MV 800000
29#define MIN_750_MV 750000
30#define MIN_600_MV 600000
31
32/* Macros to represent steps for LDO/BUCK */
33#define STEP_50_MV 50000
34#define STEP_25_MV 25000
35#define STEP_12_5_MV 12500
36#define STEP_6_25_MV 6250
37
17enum sec_device_type { 38enum sec_device_type {
18 S5M8751X, 39 S5M8751X,
19 S5M8763X, 40 S5M8763X,
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index fbc63bc0d6a2..2766108bca2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -155,18 +155,6 @@ enum s2mpa01_regulators {
155 S2MPA01_REGULATOR_MAX, 155 S2MPA01_REGULATOR_MAX,
156}; 156};
157 157
158#define S2MPA01_BUCK_MIN1 600000
159#define S2MPA01_BUCK_MIN2 800000
160#define S2MPA01_BUCK_MIN3 1000000
161#define S2MPA01_BUCK_MIN4 1500000
162#define S2MPA01_LDO_MIN 800000
163
164#define S2MPA01_BUCK_STEP1 6250
165#define S2MPA01_BUCK_STEP2 12500
166
167#define S2MPA01_LDO_STEP1 50000
168#define S2MPA01_LDO_STEP2 25000
169
170#define S2MPA01_LDO_VSEL_MASK 0x3F 158#define S2MPA01_LDO_VSEL_MASK 0x3F
171#define S2MPA01_BUCK_VSEL_MASK 0xFF 159#define S2MPA01_BUCK_VSEL_MASK 0xFF
172#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT) 160#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT)
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b3ddf98dec37..7981a9d77d3f 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -171,15 +171,6 @@ enum s2mps11_regulators {
171 S2MPS11_REGULATOR_MAX, 171 S2MPS11_REGULATOR_MAX,
172}; 172};
173 173
174#define S2MPS11_BUCK_MIN1 600000
175#define S2MPS11_BUCK_MIN2 750000
176#define S2MPS11_BUCK_MIN3 3000000
177#define S2MPS11_LDO_MIN 800000
178#define S2MPS11_BUCK_STEP1 6250
179#define S2MPS11_BUCK_STEP2 12500
180#define S2MPS11_BUCK_STEP3 25000
181#define S2MPS11_LDO_STEP1 50000
182#define S2MPS11_LDO_STEP2 25000
183#define S2MPS11_LDO_VSEL_MASK 0x3F 174#define S2MPS11_LDO_VSEL_MASK 0x3F
184#define S2MPS11_BUCK_VSEL_MASK 0xFF 175#define S2MPS11_BUCK_VSEL_MASK 0xFF
185#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) 176#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index 900cd7a04314..c92f4782afb5 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -123,10 +123,6 @@ enum s2mps14_regulators {
123}; 123};
124 124
125/* Regulator constraints for BUCKx */ 125/* Regulator constraints for BUCKx */
126#define S2MPS14_BUCK1235_MIN_600MV 600000
127#define S2MPS14_BUCK4_MIN_1400MV 1400000
128#define S2MPS14_BUCK1235_STEP_6_25MV 6250
129#define S2MPS14_BUCK4_STEP_12_5MV 12500
130#define S2MPS14_BUCK1235_START_SEL 0x20 126#define S2MPS14_BUCK1235_START_SEL 0x20
131#define S2MPS14_BUCK4_START_SEL 0x40 127#define S2MPS14_BUCK4_START_SEL 0x40
132/* 128/*
@@ -136,12 +132,6 @@ enum s2mps14_regulators {
136 */ 132 */
137#define S2MPS14_BUCK_RAMP_DELAY 12500 133#define S2MPS14_BUCK_RAMP_DELAY 12500
138 134
139/* Regulator constraints for different types of LDOx */
140#define S2MPS14_LDO_MIN_800MV 800000
141#define S2MPS14_LDO_MIN_1800MV 1800000
142#define S2MPS14_LDO_STEP_12_5MV 12500
143#define S2MPS14_LDO_STEP_25MV 25000
144
145#define S2MPS14_LDO_VSEL_MASK 0x3F 135#define S2MPS14_LDO_VSEL_MASK 0x3F
146#define S2MPS14_BUCK_VSEL_MASK 0xFF 136#define S2MPS14_BUCK_VSEL_MASK 0xFF
147#define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT) 137#define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8f6f2e91e7ae..57388171610d 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -5,6 +5,7 @@
5#include <linux/fb.h> 5#include <linux/fb.h>
6#include <linux/io.h> 6#include <linux/io.h>
7#include <linux/jiffies.h> 7#include <linux/jiffies.h>
8#include <linux/mmc/card.h>
8#include <linux/platform_device.h> 9#include <linux/platform_device.h>
9#include <linux/pm_runtime.h> 10#include <linux/pm_runtime.h>
10 11
@@ -83,6 +84,27 @@
83 */ 84 */
84#define TMIO_MMC_HAVE_HIGH_REG (1 << 6) 85#define TMIO_MMC_HAVE_HIGH_REG (1 << 6)
85 86
87/*
88 * Some controllers have CMD12 automatically
89 * issue/non-issue register
90 */
91#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
92
93/*
94 * Some controllers needs to set 1 on SDIO status reserved bits
95 */
96#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
97
98/*
99 * Some controllers have DMA enable/disable register
100 */
101#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
102
103/*
104 * Some controllers allows to set SDx actual clock
105 */
106#define TMIO_MMC_CLK_ACTUAL (1 << 10)
107
86int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 108int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
87int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); 109int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
88void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); 110void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
@@ -96,6 +118,7 @@ struct tmio_mmc_dma {
96 int slave_id_tx; 118 int slave_id_tx;
97 int slave_id_rx; 119 int slave_id_rx;
98 int alignment_shift; 120 int alignment_shift;
121 dma_addr_t dma_rx_offset;
99 bool (*filter)(struct dma_chan *chan, void *arg); 122 bool (*filter)(struct dma_chan *chan, void *arg);
100}; 123};
101 124
@@ -120,6 +143,8 @@ struct tmio_mmc_data {
120 /* clock management callbacks */ 143 /* clock management callbacks */
121 int (*clk_enable)(struct platform_device *pdev, unsigned int *f); 144 int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
122 void (*clk_disable)(struct platform_device *pdev); 145 void (*clk_disable)(struct platform_device *pdev);
146 int (*multi_io_quirk)(struct mmc_card *card,
147 unsigned int direction, int blk_size);
123}; 148};
124 149
125/* 150/*
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 2e5b194b9b19..53d33dee70e1 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -37,6 +37,7 @@
37 37
38/* struct phy_device dev_flags definitions */ 38/* struct phy_device dev_flags definitions */
39#define MICREL_PHY_50MHZ_CLK 0x00000001 39#define MICREL_PHY_50MHZ_CLK 0x00000001
40#define MICREL_PHY_25MHZ_CLK 0x00000002
40 41
41#define MICREL_KSZ9021_EXTREG_CTRL 0xB 42#define MICREL_KSZ9021_EXTREG_CTRL 0xB
42#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC 43#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a2901c414664..01aad3ed89ec 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private);
13 * Return values from addresss_space_operations.migratepage(): 13 * Return values from addresss_space_operations.migratepage():
14 * - negative errno on page migration failure; 14 * - negative errno on page migration failure;
15 * - zero on page migration success; 15 * - zero on page migration success;
16 *
17 * The balloon page migration introduces this special case where a 'distinct'
18 * return code is used to flag a successful page migration to unmap_and_move().
19 * This approach is necessary because page migration can race against balloon
20 * deflation procedure, and for such case we could introduce a nasty page leak
21 * if a successfully migrated balloon page gets released concurrently with
22 * migration's unmap_and_move() wrap-up steps.
23 */ 16 */
24#define MIGRATEPAGE_SUCCESS 0 17#define MIGRATEPAGE_SUCCESS 0
25#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page 18
26 * sucessful migration case.
27 */
28enum migrate_reason { 19enum migrate_reason {
29 MR_COMPACTION, 20 MR_COMPACTION,
30 MR_MEMORY_FAILURE, 21 MR_MEMORY_FAILURE,
@@ -82,9 +73,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
82 return -ENOSYS; 73 return -ENOSYS;
83} 74}
84 75
85/* Possible settings for the migrate_page() method in address_operations */
86#define migrate_page NULL
87
88#endif /* CONFIG_MIGRATION */ 76#endif /* CONFIG_MIGRATION */
89 77
90#ifdef CONFIG_NUMA_BALANCING 78#ifdef CONFIG_NUMA_BALANCING
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 071f6b234604..37e4404d0227 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -38,6 +38,7 @@
38#include <linux/completion.h> 38#include <linux/completion.h>
39#include <linux/radix-tree.h> 39#include <linux/radix-tree.h>
40#include <linux/cpu_rmap.h> 40#include <linux/cpu_rmap.h>
41#include <linux/crash_dump.h>
41 42
42#include <linux/atomic.h> 43#include <linux/atomic.h>
43 44
@@ -184,19 +185,24 @@ enum {
184 MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, 185 MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
185 MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, 186 MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
186 MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, 187 MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
188 MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
189 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13
187}; 190};
188 191
189enum { 192enum {
190 MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, 193 MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
191 MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1 194 MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1,
195 MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2,
196 MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3
192}; 197};
193 198
194enum { 199enum {
195 MLX4_USER_DEV_CAP_64B_CQE = 1L << 0 200 MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
196}; 201};
197 202
198enum { 203enum {
199 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0 204 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
205 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1
200}; 206};
201 207
202 208
@@ -209,6 +215,7 @@ enum {
209 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, 215 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
210 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, 216 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
211 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, 217 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
218 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
212}; 219};
213 220
214enum mlx4_event { 221enum mlx4_event {
@@ -576,7 +583,7 @@ struct mlx4_uar {
576}; 583};
577 584
578struct mlx4_bf { 585struct mlx4_bf {
579 unsigned long offset; 586 unsigned int offset;
580 int buf_size; 587 int buf_size;
581 struct mlx4_uar *uar; 588 struct mlx4_uar *uar;
582 void __iomem *reg; 589 void __iomem *reg;
@@ -700,6 +707,7 @@ struct mlx4_dev {
700 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 707 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
701 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 708 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
702 struct mlx4_vf_dev *dev_vfs; 709 struct mlx4_vf_dev *dev_vfs;
710 int nvfs[MLX4_MAX_PORTS + 1];
703}; 711};
704 712
705struct mlx4_eqe { 713struct mlx4_eqe {
@@ -1196,6 +1204,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
1196 enum mlx4_net_trans_rule_id id); 1204 enum mlx4_net_trans_rule_id id);
1197int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); 1205int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
1198 1206
1207int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1208 int port, int qpn, u16 prio, u64 *reg_id);
1209
1199void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, 1210void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
1200 int i, int val); 1211 int i, int val);
1201 1212
@@ -1275,7 +1286,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
1275/* Returns true if running in low memory profile (kdump kernel) */ 1286/* Returns true if running in low memory profile (kdump kernel) */
1276static inline bool mlx4_low_memory_profile(void) 1287static inline bool mlx4_low_memory_profile(void)
1277{ 1288{
1278 return reset_devices; 1289 return is_kdump_kernel();
1279} 1290}
1280 1291
1281#endif /* MLX4_DEVICE_H */ 1292#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 7040dc98ff8b..5f4e36cf0091 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -56,7 +56,8 @@ enum mlx4_qp_optpar {
56 MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, 56 MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
57 MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 57 MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
58 MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, 58 MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
59 MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20 59 MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20,
60 MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21,
60}; 61};
61 62
62enum mlx4_qp_state { 63enum mlx4_qp_state {
@@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg {
423 424
424enum mlx4_update_qp_attr { 425enum mlx4_update_qp_attr {
425 MLX4_UPDATE_QP_SMAC = 1 << 0, 426 MLX4_UPDATE_QP_SMAC = 1 << 0,
427 MLX4_UPDATE_QP_VSD = 1 << 2,
428 MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
429};
430
431enum mlx4_update_qp_params_flags {
432 MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
426}; 433};
427 434
428struct mlx4_update_qp_params { 435struct mlx4_update_qp_params {
429 u8 smac_index; 436 u8 smac_index;
437 u32 flags;
430}; 438};
431 439
432int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, 440int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
433 enum mlx4_update_qp_attr attr, 441 enum mlx4_update_qp_attr attr,
434 struct mlx4_update_qp_params *params); 442 struct mlx4_update_qp_params *params);
435int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 443int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 334947151dfc..1d67fd32e71c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -44,6 +44,50 @@
44#error Host endianness not defined 44#error Host endianness not defined
45#endif 45#endif
46 46
47/* helper macros */
48#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
49#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
50#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
51#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
52#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
53#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
54#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
55#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
56#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
57
58#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
59#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
60#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
61#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
62#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
63
64/* insert a value to a struct */
65#define MLX5_SET(typ, p, fld, v) do { \
66 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
67 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
68 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
69 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
70 << __mlx5_dw_bit_off(typ, fld))); \
71} while (0)
72
73#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
74__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
75__mlx5_mask(typ, fld))
76
77#define MLX5_GET_PR(typ, p, fld) ({ \
78 u32 ___t = MLX5_GET(typ, p, fld); \
79 pr_debug(#fld " = 0x%x\n", ___t); \
80 ___t; \
81})
82
83#define MLX5_SET64(typ, p, fld, v) do { \
84 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
85 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
86 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
87} while (0)
88
89#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
90
47enum { 91enum {
48 MLX5_MAX_COMMANDS = 32, 92 MLX5_MAX_COMMANDS = 32,
49 MLX5_CMD_DATA_BLOCK_SIZE = 512, 93 MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -71,6 +115,11 @@ enum {
71}; 115};
72 116
73enum { 117enum {
118 MLX5_MIN_PKEY_TABLE_SIZE = 128,
119 MLX5_MAX_LOG_PKEY_TABLE = 5,
120};
121
122enum {
74 MLX5_PERM_LOCAL_READ = 1 << 2, 123 MLX5_PERM_LOCAL_READ = 1 << 2,
75 MLX5_PERM_LOCAL_WRITE = 1 << 3, 124 MLX5_PERM_LOCAL_WRITE = 1 << 3,
76 MLX5_PERM_REMOTE_READ = 1 << 4, 125 MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -184,10 +233,10 @@ enum {
184 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 233 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
185 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 234 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
186 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, 235 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
236 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
187 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, 237 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
188 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, 238 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
189 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 239 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
190 MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
191 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 240 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
192}; 241};
193 242
@@ -243,10 +292,14 @@ enum {
243}; 292};
244 293
245enum { 294enum {
246 MLX5_CAP_OFF_DCT = 41,
247 MLX5_CAP_OFF_CMDIF_CSUM = 46, 295 MLX5_CAP_OFF_CMDIF_CSUM = 46,
248}; 296};
249 297
298enum {
299 HCA_CAP_OPMOD_GET_MAX = 0,
300 HCA_CAP_OPMOD_GET_CUR = 1,
301};
302
250struct mlx5_inbox_hdr { 303struct mlx5_inbox_hdr {
251 __be16 opcode; 304 __be16 opcode;
252 u8 rsvd[4]; 305 u8 rsvd[4];
@@ -274,101 +327,6 @@ struct mlx5_cmd_query_adapter_mbox_out {
274 u8 vsd_psid[16]; 327 u8 vsd_psid[16];
275}; 328};
276 329
277struct mlx5_hca_cap {
278 u8 rsvd1[16];
279 u8 log_max_srq_sz;
280 u8 log_max_qp_sz;
281 u8 rsvd2;
282 u8 log_max_qp;
283 u8 log_max_strq_sz;
284 u8 log_max_srqs;
285 u8 rsvd4[2];
286 u8 rsvd5;
287 u8 log_max_cq_sz;
288 u8 rsvd6;
289 u8 log_max_cq;
290 u8 log_max_eq_sz;
291 u8 log_max_mkey;
292 u8 rsvd7;
293 u8 log_max_eq;
294 u8 max_indirection;
295 u8 log_max_mrw_sz;
296 u8 log_max_bsf_list_sz;
297 u8 log_max_klm_list_sz;
298 u8 rsvd_8_0;
299 u8 log_max_ra_req_dc;
300 u8 rsvd_8_1;
301 u8 log_max_ra_res_dc;
302 u8 rsvd9;
303 u8 log_max_ra_req_qp;
304 u8 rsvd10;
305 u8 log_max_ra_res_qp;
306 u8 rsvd11[4];
307 __be16 max_qp_count;
308 __be16 rsvd12;
309 u8 rsvd13;
310 u8 local_ca_ack_delay;
311 u8 rsvd14;
312 u8 num_ports;
313 u8 log_max_msg;
314 u8 rsvd15[3];
315 __be16 stat_rate_support;
316 u8 rsvd16[2];
317 __be64 flags;
318 u8 rsvd17;
319 u8 uar_sz;
320 u8 rsvd18;
321 u8 log_pg_sz;
322 __be16 bf_log_bf_reg_size;
323 u8 rsvd19[4];
324 __be16 max_desc_sz_sq;
325 u8 rsvd20[2];
326 __be16 max_desc_sz_rq;
327 u8 rsvd21[2];
328 __be16 max_desc_sz_sq_dc;
329 __be32 max_qp_mcg;
330 u8 rsvd22[3];
331 u8 log_max_mcg;
332 u8 rsvd23;
333 u8 log_max_pd;
334 u8 rsvd24;
335 u8 log_max_xrcd;
336 u8 rsvd25[42];
337 __be16 log_uar_page_sz;
338 u8 rsvd26[28];
339 u8 log_max_atomic_size_qp;
340 u8 rsvd27[2];
341 u8 log_max_atomic_size_dc;
342 u8 rsvd28[76];
343};
344
345
346struct mlx5_cmd_query_hca_cap_mbox_in {
347 struct mlx5_inbox_hdr hdr;
348 u8 rsvd[8];
349};
350
351
352struct mlx5_cmd_query_hca_cap_mbox_out {
353 struct mlx5_outbox_hdr hdr;
354 u8 rsvd0[8];
355 struct mlx5_hca_cap hca_cap;
356};
357
358
359struct mlx5_cmd_set_hca_cap_mbox_in {
360 struct mlx5_inbox_hdr hdr;
361 u8 rsvd[8];
362 struct mlx5_hca_cap hca_cap;
363};
364
365
366struct mlx5_cmd_set_hca_cap_mbox_out {
367 struct mlx5_outbox_hdr hdr;
368 u8 rsvd0[8];
369};
370
371
372struct mlx5_cmd_init_hca_mbox_in { 330struct mlx5_cmd_init_hca_mbox_in {
373 struct mlx5_inbox_hdr hdr; 331 struct mlx5_inbox_hdr hdr;
374 u8 rsvd0[2]; 332 u8 rsvd0[2];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b88e9b46d957..246310dc8bef 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,6 +44,7 @@
44 44
45#include <linux/mlx5/device.h> 45#include <linux/mlx5/device.h>
46#include <linux/mlx5/doorbell.h> 46#include <linux/mlx5/doorbell.h>
47#include <linux/mlx5/mlx5_ifc.h>
47 48
48enum { 49enum {
49 MLX5_BOARD_ID_LEN = 64, 50 MLX5_BOARD_ID_LEN = 64,
@@ -99,81 +100,6 @@ enum {
99}; 100};
100 101
101enum { 102enum {
102 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
103 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
104 MLX5_CMD_OP_INIT_HCA = 0x102,
105 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
106 MLX5_CMD_OP_ENABLE_HCA = 0x104,
107 MLX5_CMD_OP_DISABLE_HCA = 0x105,
108 MLX5_CMD_OP_QUERY_PAGES = 0x107,
109 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
110 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
111
112 MLX5_CMD_OP_CREATE_MKEY = 0x200,
113 MLX5_CMD_OP_QUERY_MKEY = 0x201,
114 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
115 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
116
117 MLX5_CMD_OP_CREATE_EQ = 0x301,
118 MLX5_CMD_OP_DESTROY_EQ = 0x302,
119 MLX5_CMD_OP_QUERY_EQ = 0x303,
120
121 MLX5_CMD_OP_CREATE_CQ = 0x400,
122 MLX5_CMD_OP_DESTROY_CQ = 0x401,
123 MLX5_CMD_OP_QUERY_CQ = 0x402,
124 MLX5_CMD_OP_MODIFY_CQ = 0x403,
125
126 MLX5_CMD_OP_CREATE_QP = 0x500,
127 MLX5_CMD_OP_DESTROY_QP = 0x501,
128 MLX5_CMD_OP_RST2INIT_QP = 0x502,
129 MLX5_CMD_OP_INIT2RTR_QP = 0x503,
130 MLX5_CMD_OP_RTR2RTS_QP = 0x504,
131 MLX5_CMD_OP_RTS2RTS_QP = 0x505,
132 MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
133 MLX5_CMD_OP_2ERR_QP = 0x507,
134 MLX5_CMD_OP_RTS2SQD_QP = 0x508,
135 MLX5_CMD_OP_SQD2RTS_QP = 0x509,
136 MLX5_CMD_OP_2RST_QP = 0x50a,
137 MLX5_CMD_OP_QUERY_QP = 0x50b,
138 MLX5_CMD_OP_CONF_SQP = 0x50c,
139 MLX5_CMD_OP_MAD_IFC = 0x50d,
140 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
141 MLX5_CMD_OP_SUSPEND_QP = 0x50f,
142 MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
143 MLX5_CMD_OP_SQD2SQD_QP = 0x511,
144 MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
145 MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
146 MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
147
148 MLX5_CMD_OP_CREATE_PSV = 0x600,
149 MLX5_CMD_OP_DESTROY_PSV = 0x601,
150 MLX5_CMD_OP_QUERY_PSV = 0x602,
151 MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
152 MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
153
154 MLX5_CMD_OP_CREATE_SRQ = 0x700,
155 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
156 MLX5_CMD_OP_QUERY_SRQ = 0x702,
157 MLX5_CMD_OP_ARM_RQ = 0x703,
158 MLX5_CMD_OP_RESIZE_SRQ = 0x704,
159
160 MLX5_CMD_OP_ALLOC_PD = 0x800,
161 MLX5_CMD_OP_DEALLOC_PD = 0x801,
162 MLX5_CMD_OP_ALLOC_UAR = 0x802,
163 MLX5_CMD_OP_DEALLOC_UAR = 0x803,
164
165 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
166 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
167
168
169 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
170 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
171
172 MLX5_CMD_OP_ACCESS_REG = 0x805,
173 MLX5_CMD_OP_MAX = 0x810,
174};
175
176enum {
177 MLX5_REG_PCAP = 0x5001, 103 MLX5_REG_PCAP = 0x5001,
178 MLX5_REG_PMTU = 0x5003, 104 MLX5_REG_PMTU = 0x5003,
179 MLX5_REG_PTYS = 0x5004, 105 MLX5_REG_PTYS = 0x5004,
@@ -335,23 +261,30 @@ struct mlx5_port_caps {
335 int pkey_table_len; 261 int pkey_table_len;
336}; 262};
337 263
338struct mlx5_caps { 264struct mlx5_general_caps {
339 u8 log_max_eq; 265 u8 log_max_eq;
340 u8 log_max_cq; 266 u8 log_max_cq;
341 u8 log_max_qp; 267 u8 log_max_qp;
342 u8 log_max_mkey; 268 u8 log_max_mkey;
343 u8 log_max_pd; 269 u8 log_max_pd;
344 u8 log_max_srq; 270 u8 log_max_srq;
271 u8 log_max_strq;
272 u8 log_max_mrw_sz;
273 u8 log_max_bsf_list_size;
274 u8 log_max_klm_list_size;
345 u32 max_cqes; 275 u32 max_cqes;
346 int max_wqes; 276 int max_wqes;
277 u32 max_eqes;
278 u32 max_indirection;
347 int max_sq_desc_sz; 279 int max_sq_desc_sz;
348 int max_rq_desc_sz; 280 int max_rq_desc_sz;
281 int max_dc_sq_desc_sz;
349 u64 flags; 282 u64 flags;
350 u16 stat_rate_support; 283 u16 stat_rate_support;
351 int log_max_msg; 284 int log_max_msg;
352 int num_ports; 285 int num_ports;
353 int max_ra_res_qp; 286 u8 log_max_ra_res_qp;
354 int max_ra_req_qp; 287 u8 log_max_ra_req_qp;
355 int max_srq_wqes; 288 int max_srq_wqes;
356 int bf_reg_size; 289 int bf_reg_size;
357 int bf_regs_per_page; 290 int bf_regs_per_page;
@@ -363,6 +296,19 @@ struct mlx5_caps {
363 u8 log_max_mcg; 296 u8 log_max_mcg;
364 u32 max_qp_mcg; 297 u32 max_qp_mcg;
365 int min_page_sz; 298 int min_page_sz;
299 int pd_cap;
300 u32 max_qp_counters;
301 u32 pkey_table_size;
302 u8 log_max_ra_req_dc;
303 u8 log_max_ra_res_dc;
304 u32 uar_sz;
305 u8 min_log_pg_sz;
306 u8 log_max_xrcd;
307 u16 log_uar_page_sz;
308};
309
310struct mlx5_caps {
311 struct mlx5_general_caps gen;
366}; 312};
367 313
368struct mlx5_cmd_mailbox { 314struct mlx5_cmd_mailbox {
@@ -429,6 +375,16 @@ struct mlx5_core_mr {
429 u32 pd; 375 u32 pd;
430}; 376};
431 377
378enum mlx5_res_type {
379 MLX5_RES_QP,
380};
381
382struct mlx5_core_rsc_common {
383 enum mlx5_res_type res;
384 atomic_t refcount;
385 struct completion free;
386};
387
432struct mlx5_core_srq { 388struct mlx5_core_srq {
433 u32 srqn; 389 u32 srqn;
434 int max; 390 int max;
@@ -695,6 +651,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
695void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 651void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
696void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 652void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
697int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 653int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
654int mlx5_cmd_status_to_err_v2(void *ptr);
655int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
656 u16 opmod);
698int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 657int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
699 int out_size); 658 int out_size);
700int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 659int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -751,7 +710,7 @@ int mlx5_eq_init(struct mlx5_core_dev *dev);
751void mlx5_eq_cleanup(struct mlx5_core_dev *dev); 710void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
752void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); 711void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
753void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); 712void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
754void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); 713void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
755void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 714void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
756struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 715struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
757void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); 716void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
@@ -788,6 +747,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
788int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, 747int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
789 int npsvs, u32 *sig_index); 748 int npsvs, u32 *sig_index);
790int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); 749int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
750void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
791 751
792static inline u32 mlx5_mkey_to_idx(u32 mkey) 752static inline u32 mlx5_mkey_to_idx(u32 mkey)
793{ 753{
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
new file mode 100644
index 000000000000..5f48b8f592c5
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -0,0 +1,349 @@
1/*
2 * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IFC_H
34#define MLX5_IFC_H
35
36enum {
37 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
38 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
39 MLX5_CMD_OP_INIT_HCA = 0x102,
40 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
41 MLX5_CMD_OP_ENABLE_HCA = 0x104,
42 MLX5_CMD_OP_DISABLE_HCA = 0x105,
43 MLX5_CMD_OP_QUERY_PAGES = 0x107,
44 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
45 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
46 MLX5_CMD_OP_CREATE_MKEY = 0x200,
47 MLX5_CMD_OP_QUERY_MKEY = 0x201,
48 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
49 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
50 MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
51 MLX5_CMD_OP_CREATE_EQ = 0x301,
52 MLX5_CMD_OP_DESTROY_EQ = 0x302,
53 MLX5_CMD_OP_QUERY_EQ = 0x303,
54 MLX5_CMD_OP_GEN_EQE = 0x304,
55 MLX5_CMD_OP_CREATE_CQ = 0x400,
56 MLX5_CMD_OP_DESTROY_CQ = 0x401,
57 MLX5_CMD_OP_QUERY_CQ = 0x402,
58 MLX5_CMD_OP_MODIFY_CQ = 0x403,
59 MLX5_CMD_OP_CREATE_QP = 0x500,
60 MLX5_CMD_OP_DESTROY_QP = 0x501,
61 MLX5_CMD_OP_RST2INIT_QP = 0x502,
62 MLX5_CMD_OP_INIT2RTR_QP = 0x503,
63 MLX5_CMD_OP_RTR2RTS_QP = 0x504,
64 MLX5_CMD_OP_RTS2RTS_QP = 0x505,
65 MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
66 MLX5_CMD_OP_2ERR_QP = 0x507,
67 MLX5_CMD_OP_2RST_QP = 0x50a,
68 MLX5_CMD_OP_QUERY_QP = 0x50b,
69 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
70 MLX5_CMD_OP_CREATE_PSV = 0x600,
71 MLX5_CMD_OP_DESTROY_PSV = 0x601,
72 MLX5_CMD_OP_CREATE_SRQ = 0x700,
73 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
74 MLX5_CMD_OP_QUERY_SRQ = 0x702,
75 MLX5_CMD_OP_ARM_RQ = 0x703,
76 MLX5_CMD_OP_RESIZE_SRQ = 0x704,
77 MLX5_CMD_OP_CREATE_DCT = 0x710,
78 MLX5_CMD_OP_DESTROY_DCT = 0x711,
79 MLX5_CMD_OP_DRAIN_DCT = 0x712,
80 MLX5_CMD_OP_QUERY_DCT = 0x713,
81 MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
82 MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
83 MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
84 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
85 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
86 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
87 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
88 MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
89 MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
90 MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
91 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
92 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
93 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
94 MLX5_CMD_OP_ALLOC_PD = 0x800,
95 MLX5_CMD_OP_DEALLOC_PD = 0x801,
96 MLX5_CMD_OP_ALLOC_UAR = 0x802,
97 MLX5_CMD_OP_DEALLOC_UAR = 0x803,
98 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
99 MLX5_CMD_OP_ACCESS_REG = 0x805,
100 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
101 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
102 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
103 MLX5_CMD_OP_MAD_IFC = 0x50d,
104 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
105 MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c,
106 MLX5_CMD_OP_NOP = 0x80d,
107 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
108 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
109 MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
110 MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
111 MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
112 MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
113 MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
114 MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
115 MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
116 MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
117 MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
118 MLX5_CMD_OP_CREATE_TIR = 0x900,
119 MLX5_CMD_OP_MODIFY_TIR = 0x901,
120 MLX5_CMD_OP_DESTROY_TIR = 0x902,
121 MLX5_CMD_OP_QUERY_TIR = 0x903,
122 MLX5_CMD_OP_CREATE_TIS = 0x912,
123 MLX5_CMD_OP_MODIFY_TIS = 0x913,
124 MLX5_CMD_OP_DESTROY_TIS = 0x914,
125 MLX5_CMD_OP_QUERY_TIS = 0x915,
126 MLX5_CMD_OP_CREATE_SQ = 0x904,
127 MLX5_CMD_OP_MODIFY_SQ = 0x905,
128 MLX5_CMD_OP_DESTROY_SQ = 0x906,
129 MLX5_CMD_OP_QUERY_SQ = 0x907,
130 MLX5_CMD_OP_CREATE_RQ = 0x908,
131 MLX5_CMD_OP_MODIFY_RQ = 0x909,
132 MLX5_CMD_OP_DESTROY_RQ = 0x90a,
133 MLX5_CMD_OP_QUERY_RQ = 0x90b,
134 MLX5_CMD_OP_CREATE_RMP = 0x90c,
135 MLX5_CMD_OP_MODIFY_RMP = 0x90d,
136 MLX5_CMD_OP_DESTROY_RMP = 0x90e,
137 MLX5_CMD_OP_QUERY_RMP = 0x90f,
138 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
139 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
140 MLX5_CMD_OP_MAX = 0x911
141};
142
143struct mlx5_ifc_cmd_hca_cap_bits {
144 u8 reserved_0[0x80];
145
146 u8 log_max_srq_sz[0x8];
147 u8 log_max_qp_sz[0x8];
148 u8 reserved_1[0xb];
149 u8 log_max_qp[0x5];
150
151 u8 log_max_strq_sz[0x8];
152 u8 reserved_2[0x3];
153 u8 log_max_srqs[0x5];
154 u8 reserved_3[0x10];
155
156 u8 reserved_4[0x8];
157 u8 log_max_cq_sz[0x8];
158 u8 reserved_5[0xb];
159 u8 log_max_cq[0x5];
160
161 u8 log_max_eq_sz[0x8];
162 u8 reserved_6[0x2];
163 u8 log_max_mkey[0x6];
164 u8 reserved_7[0xc];
165 u8 log_max_eq[0x4];
166
167 u8 max_indirection[0x8];
168 u8 reserved_8[0x1];
169 u8 log_max_mrw_sz[0x7];
170 u8 reserved_9[0x2];
171 u8 log_max_bsf_list_size[0x6];
172 u8 reserved_10[0x2];
173 u8 log_max_klm_list_size[0x6];
174
175 u8 reserved_11[0xa];
176 u8 log_max_ra_req_dc[0x6];
177 u8 reserved_12[0xa];
178 u8 log_max_ra_res_dc[0x6];
179
180 u8 reserved_13[0xa];
181 u8 log_max_ra_req_qp[0x6];
182 u8 reserved_14[0xa];
183 u8 log_max_ra_res_qp[0x6];
184
185 u8 pad_cap[0x1];
186 u8 cc_query_allowed[0x1];
187 u8 cc_modify_allowed[0x1];
188 u8 reserved_15[0x1d];
189
190 u8 reserved_16[0x6];
191 u8 max_qp_cnt[0xa];
192 u8 pkey_table_size[0x10];
193
194 u8 eswitch_owner[0x1];
195 u8 reserved_17[0xa];
196 u8 local_ca_ack_delay[0x5];
197 u8 reserved_18[0x8];
198 u8 num_ports[0x8];
199
200 u8 reserved_19[0x3];
201 u8 log_max_msg[0x5];
202 u8 reserved_20[0x18];
203
204 u8 stat_rate_support[0x10];
205 u8 reserved_21[0x10];
206
207 u8 reserved_22[0x10];
208 u8 cmdif_checksum[0x2];
209 u8 sigerr_cqe[0x1];
210 u8 reserved_23[0x1];
211 u8 wq_signature[0x1];
212 u8 sctr_data_cqe[0x1];
213 u8 reserved_24[0x1];
214 u8 sho[0x1];
215 u8 tph[0x1];
216 u8 rf[0x1];
217 u8 dc[0x1];
218 u8 reserved_25[0x2];
219 u8 roce[0x1];
220 u8 atomic[0x1];
221 u8 rsz_srq[0x1];
222
223 u8 cq_oi[0x1];
224 u8 cq_resize[0x1];
225 u8 cq_moderation[0x1];
226 u8 sniffer_rule_flow[0x1];
227 u8 sniffer_rule_vport[0x1];
228 u8 sniffer_rule_phy[0x1];
229 u8 reserved_26[0x1];
230 u8 pg[0x1];
231 u8 block_lb_mc[0x1];
232 u8 reserved_27[0x3];
233 u8 cd[0x1];
234 u8 reserved_28[0x1];
235 u8 apm[0x1];
236 u8 reserved_29[0x7];
237 u8 qkv[0x1];
238 u8 pkv[0x1];
239 u8 reserved_30[0x4];
240 u8 xrc[0x1];
241 u8 ud[0x1];
242 u8 uc[0x1];
243 u8 rc[0x1];
244
245 u8 reserved_31[0xa];
246 u8 uar_sz[0x6];
247 u8 reserved_32[0x8];
248 u8 log_pg_sz[0x8];
249
250 u8 bf[0x1];
251 u8 reserved_33[0xa];
252 u8 log_bf_reg_size[0x5];
253 u8 reserved_34[0x10];
254
255 u8 reserved_35[0x10];
256 u8 max_wqe_sz_sq[0x10];
257
258 u8 reserved_36[0x10];
259 u8 max_wqe_sz_rq[0x10];
260
261 u8 reserved_37[0x10];
262 u8 max_wqe_sz_sq_dc[0x10];
263
264 u8 reserved_38[0x7];
265 u8 max_qp_mcg[0x19];
266
267 u8 reserved_39[0x18];
268 u8 log_max_mcg[0x8];
269
270 u8 reserved_40[0xb];
271 u8 log_max_pd[0x5];
272 u8 reserved_41[0xb];
273 u8 log_max_xrcd[0x5];
274
275 u8 reserved_42[0x20];
276
277 u8 reserved_43[0x3];
278 u8 log_max_rq[0x5];
279 u8 reserved_44[0x3];
280 u8 log_max_sq[0x5];
281 u8 reserved_45[0x3];
282 u8 log_max_tir[0x5];
283 u8 reserved_46[0x3];
284 u8 log_max_tis[0x5];
285
286 u8 reserved_47[0x13];
287 u8 log_max_rq_per_tir[0x5];
288 u8 reserved_48[0x3];
289 u8 log_max_tis_per_sq[0x5];
290
291 u8 reserved_49[0xe0];
292
293 u8 reserved_50[0x10];
294 u8 log_uar_page_sz[0x10];
295
296 u8 reserved_51[0x100];
297
298 u8 reserved_52[0x1f];
299 u8 cqe_zip[0x1];
300
301 u8 cqe_zip_timeout[0x10];
302 u8 cqe_zip_max_num[0x10];
303
304 u8 reserved_53[0x220];
305};
306
307struct mlx5_ifc_set_hca_cap_in_bits {
308 u8 opcode[0x10];
309 u8 reserved_0[0x10];
310
311 u8 reserved_1[0x10];
312 u8 op_mod[0x10];
313
314 u8 reserved_2[0x40];
315
316 struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
317};
318
319struct mlx5_ifc_query_hca_cap_in_bits {
320 u8 opcode[0x10];
321 u8 reserved_0[0x10];
322
323 u8 reserved_1[0x10];
324 u8 op_mod[0x10];
325
326 u8 reserved_2[0x40];
327};
328
329struct mlx5_ifc_query_hca_cap_out_bits {
330 u8 status[0x8];
331 u8 reserved_0[0x18];
332
333 u8 syndrome[0x20];
334
335 u8 reserved_1[0x40];
336
337 u8 capability_struct[256][0x8];
338};
339
340struct mlx5_ifc_set_hca_cap_out_bits {
341 u8 status[0x8];
342 u8 reserved_0[0x18];
343
344 u8 syndrome[0x20];
345
346 u8 reserved_1[0x40];
347};
348
349#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 9709b30e2d69..7c4c0f1f5805 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -342,10 +342,9 @@ struct mlx5_stride_block_ctrl_seg {
342}; 342};
343 343
344struct mlx5_core_qp { 344struct mlx5_core_qp {
345 struct mlx5_core_rsc_common common; /* must be first */
345 void (*event) (struct mlx5_core_qp *, int); 346 void (*event) (struct mlx5_core_qp *, int);
346 int qpn; 347 int qpn;
347 atomic_t refcount;
348 struct completion free;
349 struct mlx5_rsc_debug *dbg; 348 struct mlx5_rsc_debug *dbg;
350 int pid; 349 int pid;
351}; 350};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8981cc882ed2..fa0d74e06428 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -18,6 +18,7 @@
18#include <linux/pfn.h> 18#include <linux/pfn.h>
19#include <linux/bit_spinlock.h> 19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h> 20#include <linux/shrinker.h>
21#include <linux/resource.h>
21 22
22struct mempolicy; 23struct mempolicy;
23struct anon_vma; 24struct anon_vma;
@@ -553,6 +554,25 @@ static inline void __ClearPageBuddy(struct page *page)
553 atomic_set(&page->_mapcount, -1); 554 atomic_set(&page->_mapcount, -1);
554} 555}
555 556
557#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
558
559static inline int PageBalloon(struct page *page)
560{
561 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
562}
563
564static inline void __SetPageBalloon(struct page *page)
565{
566 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
567 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
568}
569
570static inline void __ClearPageBalloon(struct page *page)
571{
572 VM_BUG_ON_PAGE(!PageBalloon(page), page);
573 atomic_set(&page->_mapcount, -1);
574}
575
556void put_page(struct page *page); 576void put_page(struct page *page);
557void put_pages_list(struct list_head *pages); 577void put_pages_list(struct list_head *pages);
558 578
@@ -1247,8 +1267,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
1247 !vma_growsup(vma->vm_next, addr); 1267 !vma_growsup(vma->vm_next, addr);
1248} 1268}
1249 1269
1250extern pid_t 1270extern struct task_struct *task_of_stack(struct task_struct *task,
1251vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); 1271 struct vm_area_struct *vma, bool in_group);
1252 1272
1253extern unsigned long move_page_tables(struct vm_area_struct *vma, 1273extern unsigned long move_page_tables(struct vm_area_struct *vma,
1254 unsigned long old_addr, struct vm_area_struct *new_vma, 1274 unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -1780,6 +1800,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1780 bool *need_rmap_locks); 1800 bool *need_rmap_locks);
1781extern void exit_mmap(struct mm_struct *); 1801extern void exit_mmap(struct mm_struct *);
1782 1802
1803static inline int check_data_rlimit(unsigned long rlim,
1804 unsigned long new,
1805 unsigned long start,
1806 unsigned long end_data,
1807 unsigned long start_data)
1808{
1809 if (rlim < RLIM_INFINITY) {
1810 if (((new - start) + (end_data - start_data)) > rlim)
1811 return -ENOSPC;
1812 }
1813
1814 return 0;
1815}
1816
1783extern int mm_take_all_locks(struct mm_struct *mm); 1817extern int mm_take_all_locks(struct mm_struct *mm);
1784extern void mm_drop_all_locks(struct mm_struct *mm); 1818extern void mm_drop_all_locks(struct mm_struct *mm);
1785 1819
@@ -1985,6 +2019,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
1985#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2019#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
1986#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2020#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
1987#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2021#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
2022#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
1988 2023
1989typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2024typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1990 void *data); 2025 void *data);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d424b9de3aff..b0692d28f8e6 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -42,7 +42,8 @@ struct mmc_csd {
42 unsigned int read_partial:1, 42 unsigned int read_partial:1,
43 read_misalign:1, 43 read_misalign:1,
44 write_partial:1, 44 write_partial:1,
45 write_misalign:1; 45 write_misalign:1,
46 dsr_imp:1;
46}; 47};
47 48
48struct mmc_ext_csd { 49struct mmc_ext_csd {
@@ -74,7 +75,7 @@ struct mmc_ext_csd {
74 unsigned int sec_trim_mult; /* Secure trim multiplier */ 75 unsigned int sec_trim_mult; /* Secure trim multiplier */
75 unsigned int sec_erase_mult; /* Secure erase multiplier */ 76 unsigned int sec_erase_mult; /* Secure erase multiplier */
76 unsigned int trim_timeout; /* In milliseconds */ 77 unsigned int trim_timeout; /* In milliseconds */
77 bool enhanced_area_en; /* enable bit */ 78 bool partition_setting_completed; /* enable bit */
78 unsigned long long enhanced_area_offset; /* Units: Byte */ 79 unsigned long long enhanced_area_offset; /* Units: Byte */
79 unsigned int enhanced_area_size; /* Units: KB */ 80 unsigned int enhanced_area_size; /* Units: KB */
80 unsigned int cache_size; /* Units: KB */ 81 unsigned int cache_size; /* Units: KB */
@@ -214,11 +215,12 @@ enum mmc_blk_status {
214}; 215};
215 216
216/* The number of MMC physical partitions. These consist of: 217/* The number of MMC physical partitions. These consist of:
217 * boot partitions (2), general purpose partitions (4) in MMC v4.4. 218 * boot partitions (2), general purpose partitions (4) and
219 * RPMB partition (1) in MMC v4.4.
218 */ 220 */
219#define MMC_NUM_BOOT_PARTITION 2 221#define MMC_NUM_BOOT_PARTITION 2
220#define MMC_NUM_GP_PARTITION 4 222#define MMC_NUM_GP_PARTITION 4
221#define MMC_NUM_PHY_PARTITION 6 223#define MMC_NUM_PHY_PARTITION 7
222#define MAX_MMC_PART_NAME_LEN 20 224#define MAX_MMC_PART_NAME_LEN 20
223 225
224/* 226/*
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 29ce014ab421..001366927cf4 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -26,6 +26,8 @@ enum dw_mci_state {
26 STATE_DATA_BUSY, 26 STATE_DATA_BUSY,
27 STATE_SENDING_STOP, 27 STATE_SENDING_STOP,
28 STATE_DATA_ERROR, 28 STATE_DATA_ERROR,
29 STATE_SENDING_CMD11,
30 STATE_WAITING_CMD11_DONE,
29}; 31};
30 32
31enum { 33enum {
@@ -188,7 +190,7 @@ struct dw_mci {
188 /* Workaround flags */ 190 /* Workaround flags */
189 u32 quirks; 191 u32 quirks;
190 192
191 struct regulator *vmmc; /* Power regulator */ 193 bool vqmmc_enabled;
192 unsigned long irq_flags; /* IRQ flags */ 194 unsigned long irq_flags; /* IRQ flags */
193 int irq; 195 int irq;
194}; 196};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7960424d0bc0..df0c15396bbf 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -42,6 +42,7 @@ struct mmc_ios {
42#define MMC_POWER_OFF 0 42#define MMC_POWER_OFF 0
43#define MMC_POWER_UP 1 43#define MMC_POWER_UP 1
44#define MMC_POWER_ON 2 44#define MMC_POWER_ON 2
45#define MMC_POWER_UNDEFINED 3
45 46
46 unsigned char bus_width; /* data bus width */ 47 unsigned char bus_width; /* data bus width */
47 48
@@ -139,6 +140,13 @@ struct mmc_host_ops {
139 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); 140 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
140 void (*hw_reset)(struct mmc_host *host); 141 void (*hw_reset)(struct mmc_host *host);
141 void (*card_event)(struct mmc_host *host); 142 void (*card_event)(struct mmc_host *host);
143
144 /*
145 * Optional callback to support controllers with HW issues for multiple
146 * I/O. Returns the number of supported blocks for the request.
147 */
148 int (*multi_io_quirk)(struct mmc_card *card,
149 unsigned int direction, int blk_size);
142}; 150};
143 151
144struct mmc_card; 152struct mmc_card;
@@ -265,7 +273,6 @@ struct mmc_host {
265 273
266#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ 274#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
267#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ 275#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
268#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */
269#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ 276#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
270#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ 277#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
271#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ 278#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
@@ -365,6 +372,9 @@ struct mmc_host {
365 372
366 unsigned int slotno; /* used for sdio acpi binding */ 373 unsigned int slotno; /* used for sdio acpi binding */
367 374
375 int dsr_req; /* DSR value is valid */
376 u32 dsr; /* optional driver stage (DSR) value */
377
368 unsigned long private[0] ____cacheline_aligned; 378 unsigned long private[0] ____cacheline_aligned;
369}; 379};
370 380
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 64ec963ed347..1cd00b3a75b9 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,6 +53,11 @@
53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ 53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
54#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ 54#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
55 55
56#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
57#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
58extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
59extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
60
56 /* class 3 */ 61 /* class 3 */
57#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ 62#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
58 63
@@ -281,6 +286,7 @@ struct _mmc_csd {
281#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ 286#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
282#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ 287#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
283#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ 288#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
289#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */
284#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ 290#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
285#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ 291#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
286#define EXT_CSD_HPI_MGMT 161 /* R/W */ 292#define EXT_CSD_HPI_MGMT 161 /* R/W */
@@ -349,6 +355,7 @@ struct _mmc_csd {
349#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3) 355#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
350#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) 356#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
351 357
358#define EXT_CSD_PART_SETTING_COMPLETED (0x1)
352#define EXT_CSD_PART_SUPPORT_PART_EN (0x1) 359#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
353 360
354#define EXT_CSD_CMD_SET_NORMAL (1<<0) 361#define EXT_CSD_CMD_SET_NORMAL (1<<0)
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 09ebe57d5ce9..dba793e3a331 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -98,6 +98,8 @@ struct sdhci_host {
98#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6) 98#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
99/* Controller does not support DDR50 */ 99/* Controller does not support DDR50 */
100#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) 100#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
101/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
102#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
101 103
102 int irq; /* Device IRQ */ 104 int irq; /* Device IRQ */
103 void __iomem *ioaddr; /* Mapped address */ 105 void __iomem *ioaddr; /* Mapped address */
@@ -146,6 +148,7 @@ struct sdhci_host {
146 struct mmc_command *cmd; /* Current command */ 148 struct mmc_command *cmd; /* Current command */
147 struct mmc_data *data; /* Current data request */ 149 struct mmc_data *data; /* Current data request */
148 unsigned int data_early:1; /* Data finished before cmd */ 150 unsigned int data_early:1; /* Data finished before cmd */
151 unsigned int busy_handle:1; /* Handling the order of Busy-end */
149 152
150 struct sg_mapping_iter sg_miter; /* SG state for PIO */ 153 struct sg_mapping_iter sg_miter; /* SG state for PIO */
151 unsigned int blocks; /* remaining PIO blocks */ 154 unsigned int blocks; /* remaining PIO blocks */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index d2433381e828..e56fa24c9322 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -24,7 +24,10 @@ void mmc_gpio_free_cd(struct mmc_host *host);
24 24
25int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, 25int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
26 unsigned int idx, bool override_active_level, 26 unsigned int idx, bool override_active_level,
27 unsigned int debounce); 27 unsigned int debounce, bool *gpio_invert);
28int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
29 unsigned int idx, bool override_active_level,
30 unsigned int debounce, bool *gpio_invert);
28void mmc_gpiod_free_cd(struct mmc_host *host); 31void mmc_gpiod_free_cd(struct mmc_host *host);
29void mmc_gpiod_request_cd_irq(struct mmc_host *host); 32void mmc_gpiod_request_cd_irq(struct mmc_host *host);
30 33
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2f348d02f640..877ef226f90f 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,10 +4,14 @@
4#include <linux/stringify.h> 4#include <linux/stringify.h>
5 5
6struct page; 6struct page;
7struct vm_area_struct;
8struct mm_struct;
7 9
8extern void dump_page(struct page *page, const char *reason); 10extern void dump_page(struct page *page, const char *reason);
9extern void dump_page_badflags(struct page *page, const char *reason, 11extern void dump_page_badflags(struct page *page, const char *reason,
10 unsigned long badflags); 12 unsigned long badflags);
13void dump_vma(const struct vm_area_struct *vma);
14void dump_mm(const struct mm_struct *mm);
11 15
12#ifdef CONFIG_DEBUG_VM 16#ifdef CONFIG_DEBUG_VM
13#define VM_BUG_ON(cond) BUG_ON(cond) 17#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -18,12 +22,28 @@ extern void dump_page_badflags(struct page *page, const char *reason,
18 BUG(); \ 22 BUG(); \
19 } \ 23 } \
20 } while (0) 24 } while (0)
25#define VM_BUG_ON_VMA(cond, vma) \
26 do { \
27 if (unlikely(cond)) { \
28 dump_vma(vma); \
29 BUG(); \
30 } \
31 } while (0)
32#define VM_BUG_ON_MM(cond, mm) \
33 do { \
34 if (unlikely(cond)) { \
35 dump_mm(mm); \
36 BUG(); \
37 } \
38 } while (0)
21#define VM_WARN_ON(cond) WARN_ON(cond) 39#define VM_WARN_ON(cond) WARN_ON(cond)
22#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) 40#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
23#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) 41#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
24#else 42#else
25#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) 43#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
26#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) 44#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
45#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
46#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
27#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) 47#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
28#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) 48#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
29#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) 49#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 27288692241e..88787bb4b3b9 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -57,10 +57,13 @@ struct mmu_notifier_ops {
57 * pte. This way the VM will provide proper aging to the 57 * pte. This way the VM will provide proper aging to the
58 * accesses to the page through the secondary MMUs and not 58 * accesses to the page through the secondary MMUs and not
59 * only to the ones through the Linux pte. 59 * only to the ones through the Linux pte.
60 * Start-end is necessary in case the secondary MMU is mapping the page
61 * at a smaller granularity than the primary MMU.
60 */ 62 */
61 int (*clear_flush_young)(struct mmu_notifier *mn, 63 int (*clear_flush_young)(struct mmu_notifier *mn,
62 struct mm_struct *mm, 64 struct mm_struct *mm,
63 unsigned long address); 65 unsigned long start,
66 unsigned long end);
64 67
65 /* 68 /*
66 * test_young is called to check the young/accessed bitflag in 69 * test_young is called to check the young/accessed bitflag in
@@ -175,7 +178,8 @@ extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
175extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); 178extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
176extern void __mmu_notifier_release(struct mm_struct *mm); 179extern void __mmu_notifier_release(struct mm_struct *mm);
177extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 180extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
178 unsigned long address); 181 unsigned long start,
182 unsigned long end);
179extern int __mmu_notifier_test_young(struct mm_struct *mm, 183extern int __mmu_notifier_test_young(struct mm_struct *mm,
180 unsigned long address); 184 unsigned long address);
181extern void __mmu_notifier_change_pte(struct mm_struct *mm, 185extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -194,10 +198,11 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
194} 198}
195 199
196static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, 200static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
197 unsigned long address) 201 unsigned long start,
202 unsigned long end)
198{ 203{
199 if (mm_has_notifiers(mm)) 204 if (mm_has_notifiers(mm))
200 return __mmu_notifier_clear_flush_young(mm, address); 205 return __mmu_notifier_clear_flush_young(mm, start, end);
201 return 0; 206 return 0;
202} 207}
203 208
@@ -255,7 +260,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
255 unsigned long ___address = __address; \ 260 unsigned long ___address = __address; \
256 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ 261 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
257 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 262 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
258 ___address); \ 263 ___address, \
264 ___address + \
265 PAGE_SIZE); \
259 __young; \ 266 __young; \
260}) 267})
261 268
@@ -266,7 +273,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
266 unsigned long ___address = __address; \ 273 unsigned long ___address = __address; \
267 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ 274 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
268 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 275 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
269 ___address); \ 276 ___address, \
277 ___address + \
278 PMD_SIZE); \
270 __young; \ 279 __young; \
271}) 280})
272 281
@@ -301,7 +310,8 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
301} 310}
302 311
303static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, 312static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
304 unsigned long address) 313 unsigned long start,
314 unsigned long end)
305{ 315{
306 return 0; 316 return 0;
307} 317}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 318df7051850..48bf12ef6620 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -521,13 +521,13 @@ struct zone {
521 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 521 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
522} ____cacheline_internodealigned_in_smp; 522} ____cacheline_internodealigned_in_smp;
523 523
524typedef enum { 524enum zone_flags {
525 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 525 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
526 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 526 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
527 ZONE_CONGESTED, /* zone has many dirty pages backed by 527 ZONE_CONGESTED, /* zone has many dirty pages backed by
528 * a congested BDI 528 * a congested BDI
529 */ 529 */
530 ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found 530 ZONE_DIRTY, /* reclaim scanning has recently found
531 * many dirty file pages at the tail 531 * many dirty file pages at the tail
532 * of the LRU. 532 * of the LRU.
533 */ 533 */
@@ -535,52 +535,7 @@ typedef enum {
535 * many pages under writeback 535 * many pages under writeback
536 */ 536 */
537 ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ 537 ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
538} zone_flags_t; 538};
539
540static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
541{
542 set_bit(flag, &zone->flags);
543}
544
545static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
546{
547 return test_and_set_bit(flag, &zone->flags);
548}
549
550static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
551{
552 clear_bit(flag, &zone->flags);
553}
554
555static inline int zone_is_reclaim_congested(const struct zone *zone)
556{
557 return test_bit(ZONE_CONGESTED, &zone->flags);
558}
559
560static inline int zone_is_reclaim_dirty(const struct zone *zone)
561{
562 return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
563}
564
565static inline int zone_is_reclaim_writeback(const struct zone *zone)
566{
567 return test_bit(ZONE_WRITEBACK, &zone->flags);
568}
569
570static inline int zone_is_reclaim_locked(const struct zone *zone)
571{
572 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
573}
574
575static inline int zone_is_fair_depleted(const struct zone *zone)
576{
577 return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
578}
579
580static inline int zone_is_oom_locked(const struct zone *zone)
581{
582 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
583}
584 539
585static inline unsigned long zone_end_pfn(const struct zone *zone) 540static inline unsigned long zone_end_pfn(const struct zone *zone)
586{ 541{
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 494f99e852da..b43f4752304e 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -42,7 +42,7 @@ struct kernel_param;
42 * NOARG - the parameter allows for no argument (foo instead of foo=1) 42 * NOARG - the parameter allows for no argument (foo instead of foo=1)
43 */ 43 */
44enum { 44enum {
45 KERNEL_PARAM_FL_NOARG = (1 << 0) 45 KERNEL_PARAM_OPS_FL_NOARG = (1 << 0)
46}; 46};
47 47
48struct kernel_param_ops { 48struct kernel_param_ops {
@@ -56,11 +56,21 @@ struct kernel_param_ops {
56 void (*free)(void *arg); 56 void (*free)(void *arg);
57}; 57};
58 58
59/*
60 * Flags available for kernel_param
61 *
62 * UNSAFE - the parameter is dangerous and setting it will taint the kernel
63 */
64enum {
65 KERNEL_PARAM_FL_UNSAFE = (1 << 0)
66};
67
59struct kernel_param { 68struct kernel_param {
60 const char *name; 69 const char *name;
61 const struct kernel_param_ops *ops; 70 const struct kernel_param_ops *ops;
62 u16 perm; 71 u16 perm;
63 s16 level; 72 s8 level;
73 u8 flags;
64 union { 74 union {
65 void *arg; 75 void *arg;
66 const struct kparam_string *str; 76 const struct kparam_string *str;
@@ -113,6 +123,12 @@ struct kparam_array
113 module_param_named(name, name, type, perm) 123 module_param_named(name, name, type, perm)
114 124
115/** 125/**
126 * module_param_unsafe - same as module_param but taints kernel
127 */
128#define module_param_unsafe(name, type, perm) \
129 module_param_named_unsafe(name, name, type, perm)
130
131/**
116 * module_param_named - typesafe helper for a renamed module/cmdline parameter 132 * module_param_named - typesafe helper for a renamed module/cmdline parameter
117 * @name: a valid C identifier which is the parameter name. 133 * @name: a valid C identifier which is the parameter name.
118 * @value: the actual lvalue to alter. 134 * @value: the actual lvalue to alter.
@@ -129,6 +145,14 @@ struct kparam_array
129 __MODULE_PARM_TYPE(name, #type) 145 __MODULE_PARM_TYPE(name, #type)
130 146
131/** 147/**
148 * module_param_named_unsafe - same as module_param_named but taints kernel
149 */
150#define module_param_named_unsafe(name, value, type, perm) \
151 param_check_##type(name, &(value)); \
152 module_param_cb_unsafe(name, &param_ops_##type, &value, perm); \
153 __MODULE_PARM_TYPE(name, #type)
154
155/**
132 * module_param_cb - general callback for a module/cmdline parameter 156 * module_param_cb - general callback for a module/cmdline parameter
133 * @name: a valid C identifier which is the parameter name. 157 * @name: a valid C identifier which is the parameter name.
134 * @ops: the set & get operations for this parameter. 158 * @ops: the set & get operations for this parameter.
@@ -137,7 +161,11 @@ struct kparam_array
137 * The ops can have NULL set or get functions. 161 * The ops can have NULL set or get functions.
138 */ 162 */
139#define module_param_cb(name, ops, arg, perm) \ 163#define module_param_cb(name, ops, arg, perm) \
140 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) 164 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, 0)
165
166#define module_param_cb_unsafe(name, ops, arg, perm) \
167 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \
168 KERNEL_PARAM_FL_UNSAFE)
141 169
142/** 170/**
143 * <level>_param_cb - general callback for a module/cmdline parameter 171 * <level>_param_cb - general callback for a module/cmdline parameter
@@ -149,7 +177,7 @@ struct kparam_array
149 * The ops can have NULL set or get functions. 177 * The ops can have NULL set or get functions.
150 */ 178 */
151#define __level_param_cb(name, ops, arg, perm, level) \ 179#define __level_param_cb(name, ops, arg, perm, level) \
152 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level) 180 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
153 181
154#define core_param_cb(name, ops, arg, perm) \ 182#define core_param_cb(name, ops, arg, perm) \
155 __level_param_cb(name, ops, arg, perm, 1) 183 __level_param_cb(name, ops, arg, perm, 1)
@@ -184,22 +212,22 @@ struct kparam_array
184 212
185/* This is the fundamental function for registering boot/module 213/* This is the fundamental function for registering boot/module
186 parameters. */ 214 parameters. */
187#define __module_param_call(prefix, name, ops, arg, perm, level) \ 215#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
188 /* Default value instead of permissions? */ \ 216 /* Default value instead of permissions? */ \
189 static const char __param_str_##name[] = prefix #name; \ 217 static const char __param_str_##name[] = prefix #name; \
190 static struct kernel_param __moduleparam_const __param_##name \ 218 static struct kernel_param __moduleparam_const __param_##name \
191 __used \ 219 __used \
192 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ 220 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
193 = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \ 221 = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \
194 level, { arg } } 222 level, flags, { arg } }
195 223
196/* Obsolete - use module_param_cb() */ 224/* Obsolete - use module_param_cb() */
197#define module_param_call(name, set, get, arg, perm) \ 225#define module_param_call(name, set, get, arg, perm) \
198 static struct kernel_param_ops __param_ops_##name = \ 226 static struct kernel_param_ops __param_ops_##name = \
199 { 0, (void *)set, (void *)get }; \ 227 { .flags = 0, (void *)set, (void *)get }; \
200 __module_param_call(MODULE_PARAM_PREFIX, \ 228 __module_param_call(MODULE_PARAM_PREFIX, \
201 name, &__param_ops_##name, arg, \ 229 name, &__param_ops_##name, arg, \
202 (perm) + sizeof(__check_old_set_param(set))*0, -1) 230 (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
203 231
204/* We don't get oldget: it's often a new-style param_get_uint, etc. */ 232/* We don't get oldget: it's often a new-style param_get_uint, etc. */
205static inline int 233static inline int
@@ -279,7 +307,7 @@ static inline void __kernel_param_unlock(void)
279 */ 307 */
280#define core_param(name, var, type, perm) \ 308#define core_param(name, var, type, perm) \
281 param_check_##type(name, &(var)); \ 309 param_check_##type(name, &(var)); \
282 __module_param_call("", name, &param_ops_##type, &var, perm, -1) 310 __module_param_call("", name, &param_ops_##type, &var, perm, -1, 0)
283#endif /* !MODULE */ 311#endif /* !MODULE */
284 312
285/** 313/**
@@ -297,7 +325,7 @@ static inline void __kernel_param_unlock(void)
297 = { len, string }; \ 325 = { len, string }; \
298 __module_param_call(MODULE_PARAM_PREFIX, name, \ 326 __module_param_call(MODULE_PARAM_PREFIX, name, \
299 &param_ops_string, \ 327 &param_ops_string, \
300 .str = &__param_string_##name, perm, -1); \ 328 .str = &__param_string_##name, perm, -1, 0);\
301 __MODULE_PARM_TYPE(name, "string") 329 __MODULE_PARM_TYPE(name, "string")
302 330
303/** 331/**
@@ -444,7 +472,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
444 __module_param_call(MODULE_PARAM_PREFIX, name, \ 472 __module_param_call(MODULE_PARAM_PREFIX, name, \
445 &param_array_ops, \ 473 &param_array_ops, \
446 .arr = &__param_arr_##name, \ 474 .arr = &__param_arr_##name, \
447 perm, -1); \ 475 perm, -1, 0); \
448 __MODULE_PARM_TYPE(name, "array of " #type) 476 __MODULE_PARM_TYPE(name, "array of " #type)
449 477
450extern struct kernel_param_ops param_array_ops; 478extern struct kernel_param_ops param_array_ops;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8103f32f6d87..44f4746d033b 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -29,7 +29,6 @@ struct msi_desc {
29 __u8 multi_cap : 3; /* log2 num of messages supported */ 29 __u8 multi_cap : 3; /* log2 num of messages supported */
30 __u8 maskbit : 1; /* mask-pending bit supported ? */ 30 __u8 maskbit : 1; /* mask-pending bit supported ? */
31 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 31 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
32 __u8 pos; /* Location of the msi capability */
33 __u16 entry_nr; /* specific enabled entry */ 32 __u16 entry_nr; /* specific enabled entry */
34 unsigned default_irq; /* default pre-assigned irq */ 33 unsigned default_irq; /* default pre-assigned irq */
35 } msi_attrib; 34 } msi_attrib;
@@ -47,8 +46,6 @@ struct msi_desc {
47 46
48 /* Last set MSI message */ 47 /* Last set MSI message */
49 struct msi_msg msg; 48 struct msi_msg msg;
50
51 struct kobject kobj;
52}; 49};
53 50
54/* 51/*
@@ -60,7 +57,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
60void arch_teardown_msi_irq(unsigned int irq); 57void arch_teardown_msi_irq(unsigned int irq);
61int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 58int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
62void arch_teardown_msi_irqs(struct pci_dev *dev); 59void arch_teardown_msi_irqs(struct pci_dev *dev);
63int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
64void arch_restore_msi_irqs(struct pci_dev *dev); 60void arch_restore_msi_irqs(struct pci_dev *dev);
65 61
66void default_teardown_msi_irqs(struct pci_dev *dev); 62void default_teardown_msi_irqs(struct pci_dev *dev);
@@ -77,8 +73,6 @@ struct msi_chip {
77 int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, 73 int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
78 struct msi_desc *desc); 74 struct msi_desc *desc);
79 void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); 75 void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
80 int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
81 int nvec, int type);
82}; 76};
83 77
84#endif /* LINUX_MSI_H */ 78#endif /* LINUX_MSI_H */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3083c53e0270..c300db3ae285 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -949,7 +949,7 @@ static inline int jedec_feature(struct nand_chip *chip)
949 : 0; 949 : 0;
950} 950}
951 951
952/** 952/*
953 * struct nand_sdr_timings - SDR NAND chip timings 953 * struct nand_sdr_timings - SDR NAND chip timings
954 * 954 *
955 * This struct defines the timing requirements of a SDR NAND chip. 955 * This struct defines the timing requirements of a SDR NAND chip.
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 8d5535c58cc2..cc31498fc526 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -52,7 +52,7 @@ struct mutex {
52 atomic_t count; 52 atomic_t count;
53 spinlock_t wait_lock; 53 spinlock_t wait_lock;
54 struct list_head wait_list; 54 struct list_head wait_list;
55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) 55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock)
133 133
134/* 134/*
135 * See kernel/locking/mutex.c for detailed documentation of these APIs. 135 * See kernel/locking/mutex.c for detailed documentation of these APIs.
136 * Also see Documentation/mutex-design.txt. 136 * Also see Documentation/locking/mutex-design.txt.
137 */ 137 */
138#ifdef CONFIG_DEBUG_LOCK_ALLOC 138#ifdef CONFIG_DEBUG_LOCK_ALLOC
139extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 139extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 38377392d082..838407aea705 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -30,6 +30,7 @@
30#include <linux/bug.h> 30#include <linux/bug.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <linux/prefetch.h>
33#include <asm/cache.h> 34#include <asm/cache.h>
34#include <asm/byteorder.h> 35#include <asm/byteorder.h>
35 36
@@ -543,7 +544,7 @@ struct netdev_queue {
543 * read mostly part 544 * read mostly part
544 */ 545 */
545 struct net_device *dev; 546 struct net_device *dev;
546 struct Qdisc *qdisc; 547 struct Qdisc __rcu *qdisc;
547 struct Qdisc *qdisc_sleeping; 548 struct Qdisc *qdisc_sleeping;
548#ifdef CONFIG_SYSFS 549#ifdef CONFIG_SYSFS
549 struct kobject kobj; 550 struct kobject kobj;
@@ -1206,6 +1207,7 @@ enum netdev_priv_flags {
1206 IFF_SUPP_NOFCS = 1<<19, 1207 IFF_SUPP_NOFCS = 1<<19,
1207 IFF_LIVE_ADDR_CHANGE = 1<<20, 1208 IFF_LIVE_ADDR_CHANGE = 1<<20,
1208 IFF_MACVLAN = 1<<21, 1209 IFF_MACVLAN = 1<<21,
1210 IFF_XMIT_DST_RELEASE_PERM = 1<<22,
1209}; 1211};
1210 1212
1211#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1213#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1230,6 +1232,7 @@ enum netdev_priv_flags {
1230#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1232#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1231#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1233#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1232#define IFF_MACVLAN IFF_MACVLAN 1234#define IFF_MACVLAN IFF_MACVLAN
1235#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1233 1236
1234/** 1237/**
1235 * struct net_device - The DEVICE structure. 1238 * struct net_device - The DEVICE structure.
@@ -1416,6 +1419,8 @@ enum netdev_priv_flags {
1416 * @gso_max_size: Maximum size of generic segmentation offload 1419 * @gso_max_size: Maximum size of generic segmentation offload
1417 * @gso_max_segs: Maximum number of segments that can be passed to the 1420 * @gso_max_segs: Maximum number of segments that can be passed to the
1418 * NIC for GSO 1421 * NIC for GSO
1422 * @gso_min_segs: Minimum number of segments that can be passed to the
1423 * NIC for GSO
1419 * 1424 *
1420 * @dcbnl_ops: Data Center Bridging netlink ops 1425 * @dcbnl_ops: Data Center Bridging netlink ops
1421 * @num_tc: Number of traffic classes in the net device 1426 * @num_tc: Number of traffic classes in the net device
@@ -1666,7 +1671,7 @@ struct net_device {
1666 unsigned int gso_max_size; 1671 unsigned int gso_max_size;
1667#define GSO_MAX_SEGS 65535 1672#define GSO_MAX_SEGS 65535
1668 u16 gso_max_segs; 1673 u16 gso_max_segs;
1669 1674 u16 gso_min_segs;
1670#ifdef CONFIG_DCB 1675#ifdef CONFIG_DCB
1671 const struct dcbnl_rtnl_ops *dcbnl_ops; 1676 const struct dcbnl_rtnl_ops *dcbnl_ops;
1672#endif 1677#endif
@@ -1747,6 +1752,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1747 return &dev->_tx[index]; 1752 return &dev->_tx[index];
1748} 1753}
1749 1754
1755static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1756 const struct sk_buff *skb)
1757{
1758 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1759}
1760
1750static inline void netdev_for_each_tx_queue(struct net_device *dev, 1761static inline void netdev_for_each_tx_queue(struct net_device *dev,
1751 void (*f)(struct net_device *, 1762 void (*f)(struct net_device *,
1752 struct netdev_queue *, 1763 struct netdev_queue *,
@@ -1781,24 +1792,13 @@ void dev_net_set(struct net_device *dev, struct net *net)
1781#endif 1792#endif
1782} 1793}
1783 1794
1784static inline bool netdev_uses_dsa_tags(struct net_device *dev) 1795static inline bool netdev_uses_dsa(struct net_device *dev)
1785{ 1796{
1786#ifdef CONFIG_NET_DSA_TAG_DSA 1797#if IS_ENABLED(CONFIG_NET_DSA)
1787 if (dev->dsa_ptr != NULL)
1788 return dsa_uses_dsa_tags(dev->dsa_ptr);
1789#endif
1790
1791 return 0;
1792}
1793
1794static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1795{
1796#ifdef CONFIG_NET_DSA_TAG_TRAILER
1797 if (dev->dsa_ptr != NULL) 1798 if (dev->dsa_ptr != NULL)
1798 return dsa_uses_trailer_tags(dev->dsa_ptr); 1799 return dsa_uses_tagged_protocol(dev->dsa_ptr);
1799#endif 1800#endif
1800 1801 return false;
1801 return 0;
1802} 1802}
1803 1803
1804/** 1804/**
@@ -1879,11 +1879,20 @@ struct napi_gro_cb {
1879 /* jiffies when first packet was created/queued */ 1879 /* jiffies when first packet was created/queued */
1880 unsigned long age; 1880 unsigned long age;
1881 1881
1882 /* Used in ipv6_gro_receive() */ 1882 /* Used in ipv6_gro_receive() and foo-over-udp */
1883 u16 proto; 1883 u16 proto;
1884 1884
1885 /* Used in udp_gro_receive */ 1885 /* Used in udp_gro_receive */
1886 u16 udp_mark; 1886 u8 udp_mark:1;
1887
1888 /* GRO checksum is valid */
1889 u8 csum_valid:1;
1890
1891 /* Number of checksums via CHECKSUM_UNNECESSARY */
1892 u8 csum_cnt:3;
1893
1894 /* Used in foo-over-udp, set in udp[46]_gro_receive */
1895 u8 is_ipv6:1;
1887 1896
1888 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 1897 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1889 __wsum csum; 1898 __wsum csum;
@@ -1910,7 +1919,6 @@ struct packet_type {
1910struct offload_callbacks { 1919struct offload_callbacks {
1911 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1920 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1912 netdev_features_t features); 1921 netdev_features_t features);
1913 int (*gso_send_check)(struct sk_buff *skb);
1914 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1922 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1915 struct sk_buff *skb); 1923 struct sk_buff *skb);
1916 int (*gro_complete)(struct sk_buff *skb, int nhoff); 1924 int (*gro_complete)(struct sk_buff *skb, int nhoff);
@@ -1924,6 +1932,7 @@ struct packet_offload {
1924 1932
1925struct udp_offload { 1933struct udp_offload {
1926 __be16 port; 1934 __be16 port;
1935 u8 ipproto;
1927 struct offload_callbacks callbacks; 1936 struct offload_callbacks callbacks;
1928}; 1937};
1929 1938
@@ -1982,6 +1991,7 @@ struct pcpu_sw_netstats {
1982#define NETDEV_CHANGEUPPER 0x0015 1991#define NETDEV_CHANGEUPPER 0x0015
1983#define NETDEV_RESEND_IGMP 0x0016 1992#define NETDEV_RESEND_IGMP 0x0016
1984#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ 1993#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
1994#define NETDEV_CHANGEINFODATA 0x0018
1985 1995
1986int register_netdevice_notifier(struct notifier_block *nb); 1996int register_netdevice_notifier(struct notifier_block *nb);
1987int unregister_netdevice_notifier(struct notifier_block *nb); 1997int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2074,8 +2084,8 @@ void __dev_remove_pack(struct packet_type *pt);
2074void dev_add_offload(struct packet_offload *po); 2084void dev_add_offload(struct packet_offload *po);
2075void dev_remove_offload(struct packet_offload *po); 2085void dev_remove_offload(struct packet_offload *po);
2076 2086
2077struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, 2087struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2078 unsigned short mask); 2088 unsigned short mask);
2079struct net_device *dev_get_by_name(struct net *net, const char *name); 2089struct net_device *dev_get_by_name(struct net *net, const char *name);
2080struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 2090struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2081struct net_device *__dev_get_by_name(struct net *net, const char *name); 2091struct net_device *__dev_get_by_name(struct net *net, const char *name);
@@ -2153,11 +2163,97 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
2153static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, 2163static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2154 const void *start, unsigned int len) 2164 const void *start, unsigned int len)
2155{ 2165{
2156 if (skb->ip_summed == CHECKSUM_COMPLETE) 2166 if (NAPI_GRO_CB(skb)->csum_valid)
2157 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, 2167 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2158 csum_partial(start, len, 0)); 2168 csum_partial(start, len, 0));
2159} 2169}
2160 2170
2171/* GRO checksum functions. These are logical equivalents of the normal
2172 * checksum functions (in skbuff.h) except that they operate on the GRO
2173 * offsets and fields in sk_buff.
2174 */
2175
2176__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2177
2178static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2179 bool zero_okay,
2180 __sum16 check)
2181{
2182 return (skb->ip_summed != CHECKSUM_PARTIAL &&
2183 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2184 (!zero_okay || check));
2185}
2186
2187static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2188 __wsum psum)
2189{
2190 if (NAPI_GRO_CB(skb)->csum_valid &&
2191 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2192 return 0;
2193
2194 NAPI_GRO_CB(skb)->csum = psum;
2195
2196 return __skb_gro_checksum_complete(skb);
2197}
2198
2199static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2200{
2201 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2202 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2203 NAPI_GRO_CB(skb)->csum_cnt--;
2204 } else {
2205 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2206 * verified a new top level checksum or an encapsulated one
2207 * during GRO. This saves work if we fallback to normal path.
2208 */
2209 __skb_incr_checksum_unnecessary(skb);
2210 }
2211}
2212
2213#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2214 compute_pseudo) \
2215({ \
2216 __sum16 __ret = 0; \
2217 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2218 __ret = __skb_gro_checksum_validate_complete(skb, \
2219 compute_pseudo(skb, proto)); \
2220 if (__ret) \
2221 __skb_mark_checksum_bad(skb); \
2222 else \
2223 skb_gro_incr_csum_unnecessary(skb); \
2224 __ret; \
2225})
2226
2227#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2228 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2229
2230#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2231 compute_pseudo) \
2232 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2233
2234#define skb_gro_checksum_simple_validate(skb) \
2235 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2236
2237static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2238{
2239 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2240 !NAPI_GRO_CB(skb)->csum_valid);
2241}
2242
2243static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2244 __sum16 check, __wsum pseudo)
2245{
2246 NAPI_GRO_CB(skb)->csum = ~pseudo;
2247 NAPI_GRO_CB(skb)->csum_valid = 1;
2248}
2249
2250#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2251do { \
2252 if (__skb_gro_checksum_convert_check(skb)) \
2253 __skb_gro_checksum_convert(skb, check, \
2254 compute_pseudo(skb, proto)); \
2255} while (0)
2256
2161static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2257static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2162 unsigned short type, 2258 unsigned short type,
2163 const void *daddr, const void *saddr, 2259 const void *daddr, const void *saddr,
@@ -2261,12 +2357,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2261DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 2357DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2262 2358
2263void __netif_schedule(struct Qdisc *q); 2359void __netif_schedule(struct Qdisc *q);
2264 2360void netif_schedule_queue(struct netdev_queue *txq);
2265static inline void netif_schedule_queue(struct netdev_queue *txq)
2266{
2267 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
2268 __netif_schedule(txq->qdisc);
2269}
2270 2361
2271static inline void netif_tx_schedule_all(struct net_device *dev) 2362static inline void netif_tx_schedule_all(struct net_device *dev)
2272{ 2363{
@@ -2302,11 +2393,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
2302 } 2393 }
2303} 2394}
2304 2395
2305static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) 2396void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2306{
2307 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
2308 __netif_schedule(dev_queue->qdisc);
2309}
2310 2397
2311/** 2398/**
2312 * netif_wake_queue - restart transmit 2399 * netif_wake_queue - restart transmit
@@ -2394,6 +2481,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2394 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 2481 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2395} 2482}
2396 2483
2484/**
2485 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2486 * @dev_queue: pointer to transmit queue
2487 *
2488 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2489 * to give appropriate hint to the cpu.
2490 */
2491static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2492{
2493#ifdef CONFIG_BQL
2494 prefetchw(&dev_queue->dql.num_queued);
2495#endif
2496}
2497
2498/**
2499 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2500 * @dev_queue: pointer to transmit queue
2501 *
2502 * BQL enabled drivers might use this helper in their TX completion path,
2503 * to give appropriate hint to the cpu.
2504 */
2505static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2506{
2507#ifdef CONFIG_BQL
2508 prefetchw(&dev_queue->dql.limit);
2509#endif
2510}
2511
2397static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 2512static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2398 unsigned int bytes) 2513 unsigned int bytes)
2399{ 2514{
@@ -2578,19 +2693,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
2578 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 2693 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2579} 2694}
2580 2695
2581/** 2696void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
2582 * netif_wake_subqueue - allow sending packets on subqueue
2583 * @dev: network device
2584 * @queue_index: sub queue index
2585 *
2586 * Resume individual transmit queue of a device with multiple transmit queues.
2587 */
2588static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2589{
2590 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2591 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2592 __netif_schedule(txq->qdisc);
2593}
2594 2697
2595#ifdef CONFIG_XPS 2698#ifdef CONFIG_XPS
2596int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2699int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -2754,8 +2857,9 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *);
2754int dev_change_carrier(struct net_device *, bool new_carrier); 2857int dev_change_carrier(struct net_device *, bool new_carrier);
2755int dev_get_phys_port_id(struct net_device *dev, 2858int dev_get_phys_port_id(struct net_device *dev,
2756 struct netdev_phys_port_id *ppid); 2859 struct netdev_phys_port_id *ppid);
2757int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2860struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2758 struct netdev_queue *txq); 2861struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2862 struct netdev_queue *txq, int *ret);
2759int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2863int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2760int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2864int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2761bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); 2865bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
@@ -3176,7 +3280,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
3176} 3280}
3177 3281
3178/** 3282/**
3179 * __dev_uc_unsync - Remove synchonized addresses from device 3283 * __dev_uc_unsync - Remove synchronized addresses from device
3180 * @dev: device to sync 3284 * @dev: device to sync
3181 * @unsync: function to call if address should be removed 3285 * @unsync: function to call if address should be removed
3182 * 3286 *
@@ -3220,7 +3324,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
3220} 3324}
3221 3325
3222/** 3326/**
3223 * __dev_mc_unsync - Remove synchonized addresses from device 3327 * __dev_mc_unsync - Remove synchronized addresses from device
3224 * @dev: device to sync 3328 * @dev: device to sync
3225 * @unsync: function to call if address should be removed 3329 * @unsync: function to call if address should be removed
3226 * 3330 *
@@ -3357,6 +3461,27 @@ int __init dev_proc_init(void);
3357#define dev_proc_init() 0 3461#define dev_proc_init() 0
3358#endif 3462#endif
3359 3463
3464static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3465 struct sk_buff *skb, struct net_device *dev,
3466 bool more)
3467{
3468 skb->xmit_more = more ? 1 : 0;
3469 return ops->ndo_start_xmit(skb, dev);
3470}
3471
3472static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3473 struct netdev_queue *txq, bool more)
3474{
3475 const struct net_device_ops *ops = dev->netdev_ops;
3476 int rc;
3477
3478 rc = __netdev_start_xmit(ops, skb, dev, more);
3479 if (rc == NETDEV_TX_OK)
3480 txq_trans_update(txq);
3481
3482 return rc;
3483}
3484
3360int netdev_class_create_file_ns(struct class_attribute *class_attr, 3485int netdev_class_create_file_ns(struct class_attribute *class_attr,
3361 const void *ns); 3486 const void *ns);
3362void netdev_class_remove_file_ns(struct class_attribute *class_attr, 3487void netdev_class_remove_file_ns(struct class_attribute *class_attr,
@@ -3494,6 +3619,12 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
3494 return dev->priv_flags & IFF_SUPP_NOFCS; 3619 return dev->priv_flags & IFF_SUPP_NOFCS;
3495} 3620}
3496 3621
3622/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
3623static inline void netif_keep_dst(struct net_device *dev)
3624{
3625 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
3626}
3627
3497extern struct pernet_operations __net_initdata loopback_net_ops; 3628extern struct pernet_operations __net_initdata loopback_net_ops;
3498 3629
3499/* Logging, debugging and troubleshooting/diagnostic helpers. */ 3630/* Logging, debugging and troubleshooting/diagnostic helpers. */
@@ -3523,22 +3654,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
3523} 3654}
3524 3655
3525__printf(3, 4) 3656__printf(3, 4)
3526int netdev_printk(const char *level, const struct net_device *dev, 3657void netdev_printk(const char *level, const struct net_device *dev,
3527 const char *format, ...); 3658 const char *format, ...);
3528__printf(2, 3) 3659__printf(2, 3)
3529int netdev_emerg(const struct net_device *dev, const char *format, ...); 3660void netdev_emerg(const struct net_device *dev, const char *format, ...);
3530__printf(2, 3) 3661__printf(2, 3)
3531int netdev_alert(const struct net_device *dev, const char *format, ...); 3662void netdev_alert(const struct net_device *dev, const char *format, ...);
3532__printf(2, 3) 3663__printf(2, 3)
3533int netdev_crit(const struct net_device *dev, const char *format, ...); 3664void netdev_crit(const struct net_device *dev, const char *format, ...);
3534__printf(2, 3) 3665__printf(2, 3)
3535int netdev_err(const struct net_device *dev, const char *format, ...); 3666void netdev_err(const struct net_device *dev, const char *format, ...);
3536__printf(2, 3) 3667__printf(2, 3)
3537int netdev_warn(const struct net_device *dev, const char *format, ...); 3668void netdev_warn(const struct net_device *dev, const char *format, ...);
3538__printf(2, 3) 3669__printf(2, 3)
3539int netdev_notice(const struct net_device *dev, const char *format, ...); 3670void netdev_notice(const struct net_device *dev, const char *format, ...);
3540__printf(2, 3) 3671__printf(2, 3)
3541int netdev_info(const struct net_device *dev, const char *format, ...); 3672void netdev_info(const struct net_device *dev, const char *format, ...);
3542 3673
3543#define MODULE_ALIAS_NETDEV(device) \ 3674#define MODULE_ALIAS_NETDEV(device) \
3544 MODULE_ALIAS("netdev-" device) 3675 MODULE_ALIAS("netdev-" device)
@@ -3556,7 +3687,6 @@ do { \
3556({ \ 3687({ \
3557 if (0) \ 3688 if (0) \
3558 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 3689 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3559 0; \
3560}) 3690})
3561#endif 3691#endif
3562 3692
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2077489f9887..2517ece98820 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -9,6 +9,7 @@
9#include <linux/in6.h> 9#include <linux/in6.h>
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/static_key.h>
12#include <uapi/linux/netfilter.h> 13#include <uapi/linux/netfilter.h>
13#ifdef CONFIG_NETFILTER 14#ifdef CONFIG_NETFILTER
14static inline int NF_DROP_GETERR(int verdict) 15static inline int NF_DROP_GETERR(int verdict)
@@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
99 100
100extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 101extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
101 102
102#if defined(CONFIG_JUMP_LABEL) 103#ifdef HAVE_JUMP_LABEL
103#include <linux/static_key.h>
104extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 104extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
105
105static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 106static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
106{ 107{
107 if (__builtin_constant_p(pf) && 108 if (__builtin_constant_p(pf) &&
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 96afc29184be..f1606fa6132d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -57,6 +57,8 @@ enum ip_set_extension {
57 IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER), 57 IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
58 IPSET_EXT_BIT_COMMENT = 2, 58 IPSET_EXT_BIT_COMMENT = 2,
59 IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT), 59 IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
60 IPSET_EXT_BIT_SKBINFO = 3,
61 IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
60 /* Mark set with an extension which needs to call destroy */ 62 /* Mark set with an extension which needs to call destroy */
61 IPSET_EXT_BIT_DESTROY = 7, 63 IPSET_EXT_BIT_DESTROY = 7,
62 IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY), 64 IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
@@ -65,12 +67,14 @@ enum ip_set_extension {
65#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) 67#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
66#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER) 68#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
67#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT) 69#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
70#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO)
68#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD) 71#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
69 72
70/* Extension id, in size order */ 73/* Extension id, in size order */
71enum ip_set_ext_id { 74enum ip_set_ext_id {
72 IPSET_EXT_ID_COUNTER = 0, 75 IPSET_EXT_ID_COUNTER = 0,
73 IPSET_EXT_ID_TIMEOUT, 76 IPSET_EXT_ID_TIMEOUT,
77 IPSET_EXT_ID_SKBINFO,
74 IPSET_EXT_ID_COMMENT, 78 IPSET_EXT_ID_COMMENT,
75 IPSET_EXT_ID_MAX, 79 IPSET_EXT_ID_MAX,
76}; 80};
@@ -92,6 +96,10 @@ struct ip_set_ext {
92 u64 packets; 96 u64 packets;
93 u64 bytes; 97 u64 bytes;
94 u32 timeout; 98 u32 timeout;
99 u32 skbmark;
100 u32 skbmarkmask;
101 u32 skbprio;
102 u16 skbqueue;
95 char *comment; 103 char *comment;
96}; 104};
97 105
@@ -104,6 +112,13 @@ struct ip_set_comment {
104 char *str; 112 char *str;
105}; 113};
106 114
115struct ip_set_skbinfo {
116 u32 skbmark;
117 u32 skbmarkmask;
118 u32 skbprio;
119 u16 skbqueue;
120};
121
107struct ip_set; 122struct ip_set;
108 123
109#define ext_timeout(e, s) \ 124#define ext_timeout(e, s) \
@@ -112,7 +127,8 @@ struct ip_set;
112(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]) 127(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
113#define ext_comment(e, s) \ 128#define ext_comment(e, s) \
114(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]) 129(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
115 130#define ext_skbinfo(e, s) \
131(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
116 132
117typedef int (*ipset_adtfn)(struct ip_set *set, void *value, 133typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
118 const struct ip_set_ext *ext, 134 const struct ip_set_ext *ext,
@@ -256,6 +272,8 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
256 cadt_flags |= IPSET_FLAG_WITH_COUNTERS; 272 cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
257 if (SET_WITH_COMMENT(set)) 273 if (SET_WITH_COMMENT(set))
258 cadt_flags |= IPSET_FLAG_WITH_COMMENT; 274 cadt_flags |= IPSET_FLAG_WITH_COMMENT;
275 if (SET_WITH_SKBINFO(set))
276 cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
259 if (SET_WITH_FORCEADD(set)) 277 if (SET_WITH_FORCEADD(set))
260 cadt_flags |= IPSET_FLAG_WITH_FORCEADD; 278 cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
261 279
@@ -304,6 +322,43 @@ ip_set_update_counter(struct ip_set_counter *counter,
304 } 322 }
305} 323}
306 324
325static inline void
326ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
327 const struct ip_set_ext *ext,
328 struct ip_set_ext *mext, u32 flags)
329{
330 mext->skbmark = skbinfo->skbmark;
331 mext->skbmarkmask = skbinfo->skbmarkmask;
332 mext->skbprio = skbinfo->skbprio;
333 mext->skbqueue = skbinfo->skbqueue;
334}
335static inline bool
336ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
337{
338 /* Send nonzero parameters only */
339 return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
340 nla_put_net64(skb, IPSET_ATTR_SKBMARK,
341 cpu_to_be64((u64)skbinfo->skbmark << 32 |
342 skbinfo->skbmarkmask))) ||
343 (skbinfo->skbprio &&
344 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
345 cpu_to_be32(skbinfo->skbprio))) ||
346 (skbinfo->skbqueue &&
347 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
348 cpu_to_be16(skbinfo->skbqueue)));
349
350}
351
352static inline void
353ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
354 const struct ip_set_ext *ext)
355{
356 skbinfo->skbmark = ext->skbmark;
357 skbinfo->skbmarkmask = ext->skbmarkmask;
358 skbinfo->skbprio = ext->skbprio;
359 skbinfo->skbqueue = ext->skbqueue;
360}
361
307static inline bool 362static inline bool
308ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) 363ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
309{ 364{
@@ -497,6 +552,9 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
497 if (SET_WITH_COMMENT(set) && 552 if (SET_WITH_COMMENT(set) &&
498 ip_set_put_comment(skb, ext_comment(e, set))) 553 ip_set_put_comment(skb, ext_comment(e, set)))
499 return -EMSGSIZE; 554 return -EMSGSIZE;
555 if (SET_WITH_SKBINFO(set) &&
556 ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
557 return -EMSGSIZE;
500 return 0; 558 return 0;
501} 559}
502 560
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
index 68c2aea897f5..fe2622a00151 100644
--- a/include/linux/netfilter/ipset/ip_set_list.h
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -6,5 +6,6 @@
6 6
7#define IP_SET_LIST_DEFAULT_SIZE 8 7#define IP_SET_LIST_DEFAULT_SIZE 8
8#define IP_SET_LIST_MIN_SIZE 4 8#define IP_SET_LIST_MIN_SIZE 4
9#define IP_SET_LIST_MAX_SIZE 65536
9 10
10#endif /* __IP_SET_LIST_H */ 11#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 8ab1c278b66d..c755e4971fa3 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -15,7 +15,7 @@ enum nf_br_hook_priorities {
15 NF_BR_PRI_LAST = INT_MAX, 15 NF_BR_PRI_LAST = INT_MAX,
16}; 16};
17 17
18#ifdef CONFIG_BRIDGE_NETFILTER 18#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
19 19
20#define BRNF_PKT_TYPE 0x01 20#define BRNF_PKT_TYPE 0x01
21#define BRNF_BRIDGED_DNAT 0x02 21#define BRNF_BRIDGED_DNAT 0x02
@@ -24,16 +24,6 @@ enum nf_br_hook_priorities {
24#define BRNF_8021Q 0x10 24#define BRNF_8021Q 0x10
25#define BRNF_PPPoE 0x20 25#define BRNF_PPPoE 0x20
26 26
27/* Only used in br_forward.c */
28int nf_bridge_copy_header(struct sk_buff *skb);
29static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
30{
31 if (skb->nf_bridge &&
32 skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
33 return nf_bridge_copy_header(skb);
34 return 0;
35}
36
37static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) 27static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
38{ 28{
39 switch (skb->protocol) { 29 switch (skb->protocol) {
@@ -46,6 +36,44 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
46 } 36 }
47} 37}
48 38
39static inline void nf_bridge_update_protocol(struct sk_buff *skb)
40{
41 if (skb->nf_bridge->mask & BRNF_8021Q)
42 skb->protocol = htons(ETH_P_8021Q);
43 else if (skb->nf_bridge->mask & BRNF_PPPoE)
44 skb->protocol = htons(ETH_P_PPP_SES);
45}
46
47/* Fill in the header for fragmented IP packets handled by
48 * the IPv4 connection tracking code.
49 *
50 * Only used in br_forward.c
51 */
52static inline int nf_bridge_copy_header(struct sk_buff *skb)
53{
54 int err;
55 unsigned int header_size;
56
57 nf_bridge_update_protocol(skb);
58 header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
59 err = skb_cow_head(skb, header_size);
60 if (err)
61 return err;
62
63 skb_copy_to_linear_data_offset(skb, -header_size,
64 skb->nf_bridge->data, header_size);
65 __skb_push(skb, nf_bridge_encap_header_len(skb));
66 return 0;
67}
68
69static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
70{
71 if (skb->nf_bridge &&
72 skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
73 return nf_bridge_copy_header(skb);
74 return 0;
75}
76
49static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) 77static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
50{ 78{
51 if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) 79 if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index a1e3064a8d99..026b0c042c40 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -110,6 +110,20 @@ enum nfs_opnum4 {
110 OP_DESTROY_CLIENTID = 57, 110 OP_DESTROY_CLIENTID = 57,
111 OP_RECLAIM_COMPLETE = 58, 111 OP_RECLAIM_COMPLETE = 58,
112 112
113 /* nfs42 */
114 OP_ALLOCATE = 59,
115 OP_COPY = 60,
116 OP_COPY_NOTIFY = 61,
117 OP_DEALLOCATE = 62,
118 OP_IO_ADVISE = 63,
119 OP_LAYOUTERROR = 64,
120 OP_LAYOUTSTATS = 65,
121 OP_OFFLOAD_CANCEL = 66,
122 OP_OFFLOAD_STATUS = 67,
123 OP_READ_PLUS = 68,
124 OP_SEEK = 69,
125 OP_WRITE_SAME = 70,
126
113 OP_ILLEGAL = 10044, 127 OP_ILLEGAL = 10044,
114}; 128};
115 129
@@ -117,10 +131,10 @@ enum nfs_opnum4 {
117Needs to be updated if more operations are defined in future.*/ 131Needs to be updated if more operations are defined in future.*/
118 132
119#define FIRST_NFS4_OP OP_ACCESS 133#define FIRST_NFS4_OP OP_ACCESS
120#define LAST_NFS4_OP OP_RECLAIM_COMPLETE 134#define LAST_NFS4_OP OP_WRITE_SAME
121#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER 135#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
122#define LAST_NFS41_OP OP_RECLAIM_COMPLETE 136#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
123#define LAST_NFS42_OP OP_RECLAIM_COMPLETE 137#define LAST_NFS42_OP OP_WRITE_SAME
124 138
125enum nfsstat4 { 139enum nfsstat4 {
126 NFS4_OK = 0, 140 NFS4_OK = 0,
@@ -235,10 +249,11 @@ enum nfsstat4 {
235 /* nfs42 */ 249 /* nfs42 */
236 NFS4ERR_PARTNER_NOTSUPP = 10088, 250 NFS4ERR_PARTNER_NOTSUPP = 10088,
237 NFS4ERR_PARTNER_NO_AUTH = 10089, 251 NFS4ERR_PARTNER_NO_AUTH = 10089,
238 NFS4ERR_METADATA_NOTSUPP = 10090, 252 NFS4ERR_UNION_NOTSUPP = 10090,
239 NFS4ERR_OFFLOAD_DENIED = 10091, 253 NFS4ERR_OFFLOAD_DENIED = 10091,
240 NFS4ERR_WRONG_LFS = 10092, 254 NFS4ERR_WRONG_LFS = 10092,
241 NFS4ERR_BADLABEL = 10093, 255 NFS4ERR_BADLABEL = 10093,
256 NFS4ERR_OFFLOAD_NO_REQS = 10094,
242}; 257};
243 258
244static inline bool seqid_mutating_err(u32 err) 259static inline bool seqid_mutating_err(u32 err)
@@ -535,4 +550,9 @@ struct nfs4_deviceid {
535 char data[NFS4_DEVICEID4_SIZE]; 550 char data[NFS4_DEVICEID4_SIZE];
536}; 551};
537 552
553enum data_content4 {
554 NFS4_CONTENT_DATA = 0,
555 NFS4_CONTENT_HOLE = 1,
556};
557
538#endif 558#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 5180a7ededec..28d649054d5f 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -443,22 +443,6 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
443} 443}
444 444
445/* 445/*
446 * linux/fs/nfs/xattr.c
447 */
448#ifdef CONFIG_NFS_V3_ACL
449extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
450extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t);
451extern int nfs3_setxattr(struct dentry *, const char *,
452 const void *, size_t, int);
453extern int nfs3_removexattr (struct dentry *, const char *name);
454#else
455# define nfs3_listxattr NULL
456# define nfs3_getxattr NULL
457# define nfs3_setxattr NULL
458# define nfs3_removexattr NULL
459#endif
460
461/*
462 * linux/fs/nfs/direct.c 446 * linux/fs/nfs/direct.c
463 */ 447 */
464extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); 448extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
@@ -529,17 +513,9 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
529extern int nfs_wb_all(struct inode *inode); 513extern int nfs_wb_all(struct inode *inode);
530extern int nfs_wb_page(struct inode *inode, struct page* page); 514extern int nfs_wb_page(struct inode *inode, struct page* page);
531extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 515extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
532#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
533extern int nfs_commit_inode(struct inode *, int); 516extern int nfs_commit_inode(struct inode *, int);
534extern struct nfs_commit_data *nfs_commitdata_alloc(void); 517extern struct nfs_commit_data *nfs_commitdata_alloc(void);
535extern void nfs_commit_free(struct nfs_commit_data *data); 518extern void nfs_commit_free(struct nfs_commit_data *data);
536#else
537static inline int
538nfs_commit_inode(struct inode *inode, int how)
539{
540 return 0;
541}
542#endif
543 519
544static inline int 520static inline int
545nfs_have_writebacks(struct inode *inode) 521nfs_have_writebacks(struct inode *inode)
@@ -557,23 +533,6 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
557 struct page *); 533 struct page *);
558 534
559/* 535/*
560 * linux/fs/nfs3proc.c
561 */
562#ifdef CONFIG_NFS_V3_ACL
563extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type);
564extern int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type);
565extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
566 struct posix_acl *dfacl);
567extern const struct xattr_handler *nfs3_xattr_handlers[];
568#else
569static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
570 struct posix_acl *dfacl)
571{
572 return 0;
573}
574#endif /* CONFIG_NFS_V3_ACL */
575
576/*
577 * inline functions 536 * inline functions
578 */ 537 */
579 538
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 0040629894df..6951c7d9097d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -252,17 +252,6 @@ struct nfs4_layoutget {
252 gfp_t gfp_flags; 252 gfp_t gfp_flags;
253}; 253};
254 254
255struct nfs4_getdevicelist_args {
256 struct nfs4_sequence_args seq_args;
257 const struct nfs_fh *fh;
258 u32 layoutclass;
259};
260
261struct nfs4_getdevicelist_res {
262 struct nfs4_sequence_res seq_res;
263 struct pnfs_devicelist *devlist;
264};
265
266struct nfs4_getdeviceinfo_args { 255struct nfs4_getdeviceinfo_args {
267 struct nfs4_sequence_args seq_args; 256 struct nfs4_sequence_args seq_args;
268 struct pnfs_device *pdev; 257 struct pnfs_device *pdev;
@@ -279,6 +268,9 @@ struct nfs4_layoutcommit_args {
279 __u64 lastbytewritten; 268 __u64 lastbytewritten;
280 struct inode *inode; 269 struct inode *inode;
281 const u32 *bitmask; 270 const u32 *bitmask;
271 size_t layoutupdate_len;
272 struct page *layoutupdate_page;
273 struct page **layoutupdate_pages;
282}; 274};
283 275
284struct nfs4_layoutcommit_res { 276struct nfs4_layoutcommit_res {
@@ -1328,6 +1320,7 @@ struct nfs_commit_data {
1328 struct pnfs_layout_segment *lseg; 1320 struct pnfs_layout_segment *lseg;
1329 struct nfs_client *ds_clp; /* pNFS data server */ 1321 struct nfs_client *ds_clp; /* pNFS data server */
1330 int ds_commit_index; 1322 int ds_commit_index;
1323 loff_t lwb;
1331 const struct rpc_call_ops *mds_ops; 1324 const struct rpc_call_ops *mds_ops;
1332 const struct nfs_commit_completion_ops *completion_ops; 1325 const struct nfs_commit_completion_ops *completion_ops;
1333 int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); 1326 int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
@@ -1346,6 +1339,7 @@ struct nfs_unlinkdata {
1346 struct inode *dir; 1339 struct inode *dir;
1347 struct rpc_cred *cred; 1340 struct rpc_cred *cred;
1348 struct nfs_fattr dir_attr; 1341 struct nfs_fattr dir_attr;
1342 long timeout;
1349}; 1343};
1350 1344
1351struct nfs_renamedata { 1345struct nfs_renamedata {
@@ -1359,6 +1353,7 @@ struct nfs_renamedata {
1359 struct dentry *new_dentry; 1353 struct dentry *new_dentry;
1360 struct nfs_fattr new_fattr; 1354 struct nfs_fattr new_fattr;
1361 void (*complete)(struct rpc_task *, struct nfs_renamedata *); 1355 void (*complete)(struct rpc_task *, struct nfs_renamedata *);
1356 long timeout;
1362}; 1357};
1363 1358
1364struct nfs_access_entry; 1359struct nfs_access_entry;
diff --git a/include/linux/of.h b/include/linux/of.h
index 6c4363b8ddc3..6545e7aec7bb 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -863,4 +863,7 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
863} 863}
864#endif 864#endif
865 865
866/* CONFIG_OF_RESOLVE api */
867extern int of_resolve_phandles(struct device_node *tree);
868
866#endif /* _LINUX_OF_H */ 869#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index fb7b7221e063..8cb14eb393d6 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -23,17 +23,6 @@ struct of_pci_range {
23#define for_each_of_pci_range(parser, range) \ 23#define for_each_of_pci_range(parser, range) \
24 for (; of_pci_range_parser_one(parser, range);) 24 for (; of_pci_range_parser_one(parser, range);)
25 25
26static inline void of_pci_range_to_resource(struct of_pci_range *range,
27 struct device_node *np,
28 struct resource *res)
29{
30 res->flags = range->flags;
31 res->start = range->cpu_addr;
32 res->end = range->cpu_addr + range->size - 1;
33 res->parent = res->child = res->sibling = NULL;
34 res->name = np->full_name;
35}
36
37/* Translate a DMA address from device space to CPU space */ 26/* Translate a DMA address from device space to CPU space */
38extern u64 of_translate_dma_address(struct device_node *dev, 27extern u64 of_translate_dma_address(struct device_node *dev,
39 const __be32 *in_addr); 28 const __be32 *in_addr);
@@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
55extern const __be32 *of_get_address(struct device_node *dev, int index, 44extern const __be32 *of_get_address(struct device_node *dev, int index,
56 u64 *size, unsigned int *flags); 45 u64 *size, unsigned int *flags);
57 46
47extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
58extern unsigned long pci_address_to_pio(phys_addr_t addr); 48extern unsigned long pci_address_to_pio(phys_addr_t addr);
49extern phys_addr_t pci_pio_to_address(unsigned long pio);
59 50
60extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, 51extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
61 struct device_node *node); 52 struct device_node *node);
@@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
80 return NULL; 71 return NULL;
81} 72}
82 73
74static inline phys_addr_t pci_pio_to_address(unsigned long pio)
75{
76 return 0;
77}
78
83static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, 79static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
84 struct device_node *node) 80 struct device_node *node)
85{ 81{
@@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
138 u64 *size, unsigned int *flags); 134 u64 *size, unsigned int *flags);
139extern int of_pci_address_to_resource(struct device_node *dev, int bar, 135extern int of_pci_address_to_resource(struct device_node *dev, int bar,
140 struct resource *r); 136 struct resource *r);
137extern int of_pci_range_to_resource(struct of_pci_range *range,
138 struct device_node *np,
139 struct resource *res);
141#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ 140#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
142static inline int of_pci_address_to_resource(struct device_node *dev, int bar, 141static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
143 struct resource *r) 142 struct resource *r)
@@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev,
150{ 149{
151 return NULL; 150 return NULL;
152} 151}
152static inline int of_pci_range_to_resource(struct of_pci_range *range,
153 struct device_node *np,
154 struct resource *res)
155{
156 return -ENOSYS;
157}
153#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ 158#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
154 159
155#endif /* __OF_ADDRESS_H */ 160#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index dde3a4a0fa5d..1fd207e7a847 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
15int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); 16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
17int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 17int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
18int of_get_pci_domain_nr(struct device_node *node);
18#else 19#else
19static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) 20static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
20{ 21{
@@ -43,6 +44,18 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res)
43{ 44{
44 return -EINVAL; 45 return -EINVAL;
45} 46}
47
48static inline int
49of_get_pci_domain_nr(struct device_node *node)
50{
51 return -1;
52}
53#endif
54
55#if defined(CONFIG_OF_ADDRESS)
56int of_pci_get_host_bridge_resources(struct device_node *dev,
57 unsigned char busno, unsigned char bus_max,
58 struct list_head *resources, resource_size_t *io_base);
46#endif 59#endif
47 60
48#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) 61#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 6f06f8bc612c..e5a70132a240 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -306,15 +306,12 @@ extern void omap_set_dma_transfer_params(int lch, int data_type,
306 int elem_count, int frame_count, 306 int elem_count, int frame_count,
307 int sync_mode, 307 int sync_mode,
308 int dma_trigger, int src_or_dst_synch); 308 int dma_trigger, int src_or_dst_synch);
309extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode,
310 u32 color);
311extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode); 309extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
312extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode); 310extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
313 311
314extern void omap_set_dma_src_params(int lch, int src_port, int src_amode, 312extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
315 unsigned long src_start, 313 unsigned long src_start,
316 int src_ei, int src_fi); 314 int src_ei, int src_fi);
317extern void omap_set_dma_src_index(int lch, int eidx, int fidx);
318extern void omap_set_dma_src_data_pack(int lch, int enable); 315extern void omap_set_dma_src_data_pack(int lch, int enable);
319extern void omap_set_dma_src_burst_mode(int lch, 316extern void omap_set_dma_src_burst_mode(int lch,
320 enum omap_dma_burst_mode burst_mode); 317 enum omap_dma_burst_mode burst_mode);
@@ -322,7 +319,6 @@ extern void omap_set_dma_src_burst_mode(int lch,
322extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, 319extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
323 unsigned long dest_start, 320 unsigned long dest_start,
324 int dst_ei, int dst_fi); 321 int dst_ei, int dst_fi);
325extern void omap_set_dma_dest_index(int lch, int eidx, int fidx);
326extern void omap_set_dma_dest_data_pack(int lch, int enable); 322extern void omap_set_dma_dest_data_pack(int lch, int enable);
327extern void omap_set_dma_dest_burst_mode(int lch, 323extern void omap_set_dma_dest_burst_mode(int lch,
328 enum omap_dma_burst_mode burst_mode); 324 enum omap_dma_burst_mode burst_mode);
@@ -331,52 +327,19 @@ extern void omap_set_dma_params(int lch,
331 struct omap_dma_channel_params *params); 327 struct omap_dma_channel_params *params);
332 328
333extern void omap_dma_link_lch(int lch_head, int lch_queue); 329extern void omap_dma_link_lch(int lch_head, int lch_queue);
334extern void omap_dma_unlink_lch(int lch_head, int lch_queue);
335 330
336extern int omap_set_dma_callback(int lch, 331extern int omap_set_dma_callback(int lch,
337 void (*callback)(int lch, u16 ch_status, void *data), 332 void (*callback)(int lch, u16 ch_status, void *data),
338 void *data); 333 void *data);
339extern dma_addr_t omap_get_dma_src_pos(int lch); 334extern dma_addr_t omap_get_dma_src_pos(int lch);
340extern dma_addr_t omap_get_dma_dst_pos(int lch); 335extern dma_addr_t omap_get_dma_dst_pos(int lch);
341extern void omap_clear_dma(int lch);
342extern int omap_get_dma_active_status(int lch); 336extern int omap_get_dma_active_status(int lch);
343extern int omap_dma_running(void); 337extern int omap_dma_running(void);
344extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth, 338extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
345 int tparams); 339 int tparams);
346extern int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
347 unsigned char write_prio);
348extern void omap_set_dma_dst_endian_type(int lch, enum end_type etype);
349extern void omap_set_dma_src_endian_type(int lch, enum end_type etype);
350extern int omap_get_dma_index(int lch, int *ei, int *fi);
351
352void omap_dma_global_context_save(void); 340void omap_dma_global_context_save(void);
353void omap_dma_global_context_restore(void); 341void omap_dma_global_context_restore(void);
354 342
355extern void omap_dma_disable_irq(int lch);
356
357/* Chaining APIs */
358#ifndef CONFIG_ARCH_OMAP1
359extern int omap_request_dma_chain(int dev_id, const char *dev_name,
360 void (*callback) (int lch, u16 ch_status,
361 void *data),
362 int *chain_id, int no_of_chans,
363 int chain_mode,
364 struct omap_dma_channel_params params);
365extern int omap_free_dma_chain(int chain_id);
366extern int omap_dma_chain_a_transfer(int chain_id, int src_start,
367 int dest_start, int elem_count,
368 int frame_count, void *callbk_data);
369extern int omap_start_dma_chain_transfers(int chain_id);
370extern int omap_stop_dma_chain_transfers(int chain_id);
371extern int omap_get_dma_chain_index(int chain_id, int *ei, int *fi);
372extern int omap_get_dma_chain_dst_pos(int chain_id);
373extern int omap_get_dma_chain_src_pos(int chain_id);
374
375extern int omap_modify_dma_chain_params(int chain_id,
376 struct omap_dma_channel_params params);
377extern int omap_dma_chain_status(int chain_id);
378#endif
379
380#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP) 343#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
381#include <mach/lcd_dma.h> 344#include <mach/lcd_dma.h>
382#else 345#else
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 3df8c7db7a4e..7ea069cd3257 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -24,8 +24,7 @@ enum mapping_flags {
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ 24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ 25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ 26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ 27 AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
28 AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
29}; 28};
30 29
31static inline void mapping_set_error(struct address_space *mapping, int error) 30static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
55 return !!mapping; 54 return !!mapping;
56} 55}
57 56
58static inline void mapping_set_balloon(struct address_space *mapping)
59{
60 set_bit(AS_BALLOON_MAP, &mapping->flags);
61}
62
63static inline void mapping_clear_balloon(struct address_space *mapping)
64{
65 clear_bit(AS_BALLOON_MAP, &mapping->flags);
66}
67
68static inline int mapping_balloon(struct address_space *mapping)
69{
70 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
71}
72
73static inline void mapping_set_exiting(struct address_space *mapping) 57static inline void mapping_set_exiting(struct address_space *mapping)
74{ 58{
75 set_bit(AS_EXITING, &mapping->flags); 59 set_bit(AS_EXITING, &mapping->flags);
@@ -96,7 +80,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
96} 80}
97 81
98/* 82/*
99 * The page cache can done in larger chunks than 83 * The page cache can be done in larger chunks than
100 * one page, because it allows for more efficient 84 * one page, because it allows for more efficient
101 * throughput (it can then be mapped into user 85 * throughput (it can then be mapped into user
102 * space in smaller chunks for same flexibility). 86 * space in smaller chunks for same flexibility).
@@ -496,12 +480,14 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
496} 480}
497 481
498/* 482/*
499 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 483 * This is exported only for wait_on_page_locked/wait_on_page_writeback,
500 * Never use this directly! 484 * and for filesystems which need to wait on PG_private.
501 */ 485 */
502extern void wait_on_page_bit(struct page *page, int bit_nr); 486extern void wait_on_page_bit(struct page *page, int bit_nr);
503 487
504extern int wait_on_page_bit_killable(struct page *page, int bit_nr); 488extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
489extern int wait_on_page_bit_killable_timeout(struct page *page,
490 int bit_nr, unsigned long timeout);
505 491
506static inline int wait_on_page_locked_killable(struct page *page) 492static inline int wait_on_page_locked_killable(struct page *page)
507{ 493{
@@ -510,6 +496,12 @@ static inline int wait_on_page_locked_killable(struct page *page)
510 return 0; 496 return 0;
511} 497}
512 498
499extern wait_queue_head_t *page_waitqueue(struct page *page);
500static inline void wake_up_page(struct page *page, int bit)
501{
502 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
503}
504
513/* 505/*
514 * Wait for a page to be unlocked. 506 * Wait for a page to be unlocked.
515 * 507 *
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 61978a460841..5be8db45e368 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -45,7 +45,7 @@
45 * In the interest of not exposing interfaces to user-space unnecessarily, 45 * In the interest of not exposing interfaces to user-space unnecessarily,
46 * the following kernel-only defines are being added here. 46 * the following kernel-only defines are being added here.
47 */ 47 */
48#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn) 48#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
49/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 49/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
50#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 50#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
51 51
@@ -303,6 +303,7 @@ struct pci_dev {
303 D3cold, not set for devices 303 D3cold, not set for devices
304 powered on/off by the 304 powered on/off by the
305 corresponding bridge */ 305 corresponding bridge */
306 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
306 unsigned int d3_delay; /* D3->D0 transition time in ms */ 307 unsigned int d3_delay; /* D3->D0 transition time in ms */
307 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ 308 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
308 309
@@ -456,6 +457,9 @@ struct pci_bus {
456 unsigned char primary; /* number of primary bridge */ 457 unsigned char primary; /* number of primary bridge */
457 unsigned char max_bus_speed; /* enum pci_bus_speed */ 458 unsigned char max_bus_speed; /* enum pci_bus_speed */
458 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 459 unsigned char cur_bus_speed; /* enum pci_bus_speed */
460#ifdef CONFIG_PCI_DOMAINS_GENERIC
461 int domain_nr;
462#endif
459 463
460 char name[48]; 464 char name[48];
461 465
@@ -1021,6 +1025,11 @@ bool pci_dev_run_wake(struct pci_dev *dev);
1021bool pci_check_pme_status(struct pci_dev *dev); 1025bool pci_check_pme_status(struct pci_dev *dev);
1022void pci_pme_wakeup_bus(struct pci_bus *bus); 1026void pci_pme_wakeup_bus(struct pci_bus *bus);
1023 1027
1028static inline void pci_ignore_hotplug(struct pci_dev *dev)
1029{
1030 dev->ignore_hotplug = 1;
1031}
1032
1024static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1033static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1025 bool enable) 1034 bool enable)
1026{ 1035{
@@ -1097,6 +1106,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1097 resource_size_t), 1106 resource_size_t),
1098 void *alignf_data); 1107 void *alignf_data);
1099 1108
1109
1110int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1111
1100static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1112static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1101{ 1113{
1102 struct pci_bus_region region; 1114 struct pci_bus_region region;
@@ -1282,12 +1294,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
1282 */ 1294 */
1283#ifdef CONFIG_PCI_DOMAINS 1295#ifdef CONFIG_PCI_DOMAINS
1284extern int pci_domains_supported; 1296extern int pci_domains_supported;
1297int pci_get_new_domain_nr(void);
1285#else 1298#else
1286enum { pci_domains_supported = 0 }; 1299enum { pci_domains_supported = 0 };
1287static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1300static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1288static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } 1301static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1302static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1289#endif /* CONFIG_PCI_DOMAINS */ 1303#endif /* CONFIG_PCI_DOMAINS */
1290 1304
1305/*
1306 * Generic implementation for PCI domain support. If your
1307 * architecture does not need custom management of PCI
1308 * domains then this implementation will be used
1309 */
1310#ifdef CONFIG_PCI_DOMAINS_GENERIC
1311static inline int pci_domain_nr(struct pci_bus *bus)
1312{
1313 return bus->domain_nr;
1314}
1315void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
1316#else
1317static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
1318 struct device *parent)
1319{
1320}
1321#endif
1322
1291/* some architectures require additional setup to direct VGA traffic */ 1323/* some architectures require additional setup to direct VGA traffic */
1292typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1324typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1293 unsigned int command_bits, u32 flags); 1325 unsigned int command_bits, u32 flags);
@@ -1396,6 +1428,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1396 1428
1397static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1429static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1398static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1430static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1431static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1399 1432
1400#define dev_is_pci(d) (false) 1433#define dev_is_pci(d) (false)
1401#define dev_is_pf(d) (false) 1434#define dev_is_pf(d) (false)
@@ -1557,16 +1590,11 @@ enum pci_fixup_pass {
1557 1590
1558#ifdef CONFIG_PCI_QUIRKS 1591#ifdef CONFIG_PCI_QUIRKS
1559void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1592void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
1560struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
1561int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); 1593int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
1562void pci_dev_specific_enable_acs(struct pci_dev *dev); 1594void pci_dev_specific_enable_acs(struct pci_dev *dev);
1563#else 1595#else
1564static inline void pci_fixup_device(enum pci_fixup_pass pass, 1596static inline void pci_fixup_device(enum pci_fixup_pass pass,
1565 struct pci_dev *dev) { } 1597 struct pci_dev *dev) { }
1566static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
1567{
1568 return pci_dev_get(dev);
1569}
1570static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, 1598static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
1571 u16 acs_flags) 1599 u16 acs_flags)
1572{ 1600{
@@ -1701,7 +1729,7 @@ bool pci_acs_path_enabled(struct pci_dev *start,
1701 struct pci_dev *end, u16 acs_flags); 1729 struct pci_dev *end, u16 acs_flags);
1702 1730
1703#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 1731#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
1704#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) 1732#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
1705 1733
1706/* Large Resource Data Type Tag Item Names */ 1734/* Large Resource Data Type Tag Item Names */
1707#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ 1735#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
@@ -1828,15 +1856,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
1828 int (*fn)(struct pci_dev *pdev, 1856 int (*fn)(struct pci_dev *pdev,
1829 u16 alias, void *data), void *data); 1857 u16 alias, void *data), void *data);
1830 1858
1831/** 1859/* helper functions for operation of device flag */
1832 * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device 1860static inline void pci_set_dev_assigned(struct pci_dev *pdev)
1833 * @pdev: the PCI device 1861{
1834 * 1862 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
1835 * if the device is PCIE, return NULL 1863}
1836 * if the device isn't connected to a PCIe bridge (that is its parent is a 1864static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
1837 * legacy PCI bridge and the bridge is directly connected to bus 0), return its 1865{
1838 * parent 1866 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
1839 */ 1867}
1840struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); 1868static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
1841 1869{
1870 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
1871}
1842#endif /* LINUX_PCI_H */ 1872#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 5f2e559af6b0..2706ee9a4327 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -187,6 +187,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
187 return -ENODEV; 187 return -ENODEV;
188} 188}
189#endif 189#endif
190
191void pci_configure_slot(struct pci_dev *dev);
192#endif 190#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index aa0d39073e9c..24f97bf74266 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2245,6 +2245,8 @@
2245#define PCI_VENDOR_ID_MORETON 0x15aa 2245#define PCI_VENDOR_ID_MORETON 0x15aa
2246#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 2246#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
2247 2247
2248#define PCI_VENDOR_ID_VMWARE 0x15ad
2249
2248#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 2250#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
2249#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 2251#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
2250 2252
@@ -2536,6 +2538,7 @@
2536#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 2538#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
2537#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 2539#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
2538#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 2540#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
2541#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
2539#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 2542#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
2540#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 2543#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
2541#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 2544#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
@@ -2818,7 +2821,22 @@
2818#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 2821#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
2819#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 2822#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
2820#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 2823#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
2824#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
2825#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
2826#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
2827#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
2828#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
2829#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
2830#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
2831#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
2832#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
2833#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
2834#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
2835#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
2821#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 2836#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
2837#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
2838#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
2839#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
2822#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f 2840#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
2823#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 2841#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
2824#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3 2842#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 3dfbf237cd8f..d5c89e0dd0e6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -13,7 +13,7 @@
13 * 13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less 14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see 15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PCPU_COUNT_BIAS. 16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
17 * 17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the 18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() 19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
@@ -29,7 +29,7 @@
29 * calls io_destroy() or the process exits. 29 * calls io_destroy() or the process exits.
30 * 30 *
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it 31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove 32 * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be 33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop 34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put(). 35 * the initial ref with percpu_ref_put().
@@ -49,28 +49,60 @@
49#include <linux/kernel.h> 49#include <linux/kernel.h>
50#include <linux/percpu.h> 50#include <linux/percpu.h>
51#include <linux/rcupdate.h> 51#include <linux/rcupdate.h>
52#include <linux/gfp.h>
52 53
53struct percpu_ref; 54struct percpu_ref;
54typedef void (percpu_ref_func_t)(struct percpu_ref *); 55typedef void (percpu_ref_func_t)(struct percpu_ref *);
55 56
57/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
61 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
62
63 __PERCPU_REF_FLAG_BITS = 2,
64};
65
66/* @flags for percpu_ref_init() */
67enum {
68 /*
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu(). If initialized
71 * with this flag, the ref will stay in atomic mode until
72 * percpu_ref_switch_to_percpu() is invoked on it.
73 */
74 PERCPU_REF_INIT_ATOMIC = 1 << 0,
75
76 /*
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
78 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
79 */
80 PERCPU_REF_INIT_DEAD = 1 << 1,
81};
82
56struct percpu_ref { 83struct percpu_ref {
57 atomic_t count; 84 atomic_long_t count;
58 /* 85 /*
59 * The low bit of the pointer indicates whether the ref is in percpu 86 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t. 87 * mode; if set, then get/put will manipulate the atomic_t.
61 */ 88 */
62 unsigned long pcpu_count_ptr; 89 unsigned long percpu_count_ptr;
63 percpu_ref_func_t *release; 90 percpu_ref_func_t *release;
64 percpu_ref_func_t *confirm_kill; 91 percpu_ref_func_t *confirm_switch;
92 bool force_atomic:1;
65 struct rcu_head rcu; 93 struct rcu_head rcu;
66}; 94};
67 95
68int __must_check percpu_ref_init(struct percpu_ref *ref, 96int __must_check percpu_ref_init(struct percpu_ref *ref,
69 percpu_ref_func_t *release); 97 percpu_ref_func_t *release, unsigned int flags,
70void percpu_ref_reinit(struct percpu_ref *ref); 98 gfp_t gfp);
71void percpu_ref_exit(struct percpu_ref *ref); 99void percpu_ref_exit(struct percpu_ref *ref);
100void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 percpu_ref_func_t *confirm_switch);
102void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
72void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 103void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
73 percpu_ref_func_t *confirm_kill); 104 percpu_ref_func_t *confirm_kill);
105void percpu_ref_reinit(struct percpu_ref *ref);
74 106
75/** 107/**
76 * percpu_ref_kill - drop the initial ref 108 * percpu_ref_kill - drop the initial ref
@@ -87,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
87 return percpu_ref_kill_and_confirm(ref, NULL); 119 return percpu_ref_kill_and_confirm(ref, NULL);
88} 120}
89 121
90#define PCPU_REF_DEAD 1
91
92/* 122/*
93 * Internal helper. Don't use outside percpu-refcount proper. The 123 * Internal helper. Don't use outside percpu-refcount proper. The
94 * function doesn't return the pointer and let the caller test it for NULL 124 * function doesn't return the pointer and let the caller test it for NULL
95 * because doing so forces the compiler to generate two conditional 125 * because doing so forces the compiler to generate two conditional
96 * branches as it can't assume that @ref->pcpu_count is not NULL. 126 * branches as it can't assume that @ref->percpu_count is not NULL.
97 */ 127 */
98static inline bool __pcpu_ref_alive(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
99 unsigned __percpu **pcpu_countp) 129 unsigned long __percpu **percpu_countp)
100{ 130{
101 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); 131 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
102 132
103 /* paired with smp_store_release() in percpu_ref_reinit() */ 133 /* paired with smp_store_release() in percpu_ref_reinit() */
104 smp_read_barrier_depends(); 134 smp_read_barrier_depends();
105 135
106 if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) 136 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
107 return false; 137 return false;
108 138
109 *pcpu_countp = (unsigned __percpu *)pcpu_ptr; 139 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
110 return true; 140 return true;
111} 141}
112 142
@@ -114,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
114 * percpu_ref_get - increment a percpu refcount 144 * percpu_ref_get - increment a percpu refcount
115 * @ref: percpu_ref to get 145 * @ref: percpu_ref to get
116 * 146 *
117 * Analagous to atomic_inc(). 147 * Analagous to atomic_long_inc().
118 */ 148 *
149 * This function is safe to call as long as @ref is between init and exit.
150 */
119static inline void percpu_ref_get(struct percpu_ref *ref) 151static inline void percpu_ref_get(struct percpu_ref *ref)
120{ 152{
121 unsigned __percpu *pcpu_count; 153 unsigned long __percpu *percpu_count;
122 154
123 rcu_read_lock_sched(); 155 rcu_read_lock_sched();
124 156
125 if (__pcpu_ref_alive(ref, &pcpu_count)) 157 if (__ref_is_percpu(ref, &percpu_count))
126 this_cpu_inc(*pcpu_count); 158 this_cpu_inc(*percpu_count);
127 else 159 else
128 atomic_inc(&ref->count); 160 atomic_long_inc(&ref->count);
129 161
130 rcu_read_unlock_sched(); 162 rcu_read_unlock_sched();
131} 163}
@@ -137,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
137 * Increment a percpu refcount unless its count already reached zero. 169 * Increment a percpu refcount unless its count already reached zero.
138 * Returns %true on success; %false on failure. 170 * Returns %true on success; %false on failure.
139 * 171 *
140 * The caller is responsible for ensuring that @ref stays accessible. 172 * This function is safe to call as long as @ref is between init and exit.
141 */ 173 */
142static inline bool percpu_ref_tryget(struct percpu_ref *ref) 174static inline bool percpu_ref_tryget(struct percpu_ref *ref)
143{ 175{
144 unsigned __percpu *pcpu_count; 176 unsigned long __percpu *percpu_count;
145 int ret = false; 177 int ret;
146 178
147 rcu_read_lock_sched(); 179 rcu_read_lock_sched();
148 180
149 if (__pcpu_ref_alive(ref, &pcpu_count)) { 181 if (__ref_is_percpu(ref, &percpu_count)) {
150 this_cpu_inc(*pcpu_count); 182 this_cpu_inc(*percpu_count);
151 ret = true; 183 ret = true;
152 } else { 184 } else {
153 ret = atomic_inc_not_zero(&ref->count); 185 ret = atomic_long_inc_not_zero(&ref->count);
154 } 186 }
155 187
156 rcu_read_unlock_sched(); 188 rcu_read_unlock_sched();
@@ -165,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
165 * Increment a percpu refcount unless it has already been killed. Returns 197 * Increment a percpu refcount unless it has already been killed. Returns
166 * %true on success; %false on failure. 198 * %true on success; %false on failure.
167 * 199 *
168 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget 200 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
169 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be 201 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
170 * used. After the confirm_kill callback is invoked, it's guaranteed that 202 * should be used. After the confirm_kill callback is invoked, it's
171 * no new reference will be given out by percpu_ref_tryget(). 203 * guaranteed that no new reference will be given out by
204 * percpu_ref_tryget_live().
172 * 205 *
173 * The caller is responsible for ensuring that @ref stays accessible. 206 * This function is safe to call as long as @ref is between init and exit.
174 */ 207 */
175static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 208static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
176{ 209{
177 unsigned __percpu *pcpu_count; 210 unsigned long __percpu *percpu_count;
178 int ret = false; 211 int ret = false;
179 212
180 rcu_read_lock_sched(); 213 rcu_read_lock_sched();
181 214
182 if (__pcpu_ref_alive(ref, &pcpu_count)) { 215 if (__ref_is_percpu(ref, &percpu_count)) {
183 this_cpu_inc(*pcpu_count); 216 this_cpu_inc(*percpu_count);
184 ret = true; 217 ret = true;
218 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
219 ret = atomic_long_inc_not_zero(&ref->count);
185 } 220 }
186 221
187 rcu_read_unlock_sched(); 222 rcu_read_unlock_sched();
@@ -195,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
195 * 230 *
196 * Decrement the refcount, and if 0, call the release function (which was passed 231 * Decrement the refcount, and if 0, call the release function (which was passed
197 * to percpu_ref_init()) 232 * to percpu_ref_init())
233 *
234 * This function is safe to call as long as @ref is between init and exit.
198 */ 235 */
199static inline void percpu_ref_put(struct percpu_ref *ref) 236static inline void percpu_ref_put(struct percpu_ref *ref)
200{ 237{
201 unsigned __percpu *pcpu_count; 238 unsigned long __percpu *percpu_count;
202 239
203 rcu_read_lock_sched(); 240 rcu_read_lock_sched();
204 241
205 if (__pcpu_ref_alive(ref, &pcpu_count)) 242 if (__ref_is_percpu(ref, &percpu_count))
206 this_cpu_dec(*pcpu_count); 243 this_cpu_dec(*percpu_count);
207 else if (unlikely(atomic_dec_and_test(&ref->count))) 244 else if (unlikely(atomic_long_dec_and_test(&ref->count)))
208 ref->release(ref); 245 ref->release(ref);
209 246
210 rcu_read_unlock_sched(); 247 rcu_read_unlock_sched();
@@ -215,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
215 * @ref: percpu_ref to test 252 * @ref: percpu_ref to test
216 * 253 *
217 * Returns %true if @ref reached zero. 254 * Returns %true if @ref reached zero.
255 *
256 * This function is safe to call as long as @ref is between init and exit.
218 */ 257 */
219static inline bool percpu_ref_is_zero(struct percpu_ref *ref) 258static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
220{ 259{
221 unsigned __percpu *pcpu_count; 260 unsigned long __percpu *percpu_count;
222 261
223 if (__pcpu_ref_alive(ref, &pcpu_count)) 262 if (__ref_is_percpu(ref, &percpu_count))
224 return false; 263 return false;
225 return !atomic_read(&ref->count); 264 return !atomic_long_read(&ref->count);
226} 265}
227 266
228#endif 267#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 6f61b61b7996..a3aa63e47637 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,9 +48,9 @@
48 * intelligent way to determine this would be nice. 48 * intelligent way to determine this would be nice.
49 */ 49 */
50#if BITS_PER_LONG > 32 50#if BITS_PER_LONG > 32
51#define PERCPU_DYNAMIC_RESERVE (20 << 10) 51#define PERCPU_DYNAMIC_RESERVE (28 << 10)
52#else 52#else
53#define PERCPU_DYNAMIC_RESERVE (12 << 10) 53#define PERCPU_DYNAMIC_RESERVE (20 << 10)
54#endif 54#endif
55 55
56extern void *pcpu_base_addr; 56extern void *pcpu_base_addr;
@@ -122,11 +122,16 @@ extern void __init setup_per_cpu_areas(void);
122#endif 122#endif
123extern void __init percpu_init_late(void); 123extern void __init percpu_init_late(void);
124 124
125extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
125extern void __percpu *__alloc_percpu(size_t size, size_t align); 126extern void __percpu *__alloc_percpu(size_t size, size_t align);
126extern void free_percpu(void __percpu *__pdata); 127extern void free_percpu(void __percpu *__pdata);
127extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 128extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
128 129
129#define alloc_percpu(type) \ 130#define alloc_percpu_gfp(type, gfp) \
130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) 131 (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
132 __alignof__(type), gfp)
133#define alloc_percpu(type) \
134 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
135 __alignof__(type))
131 136
132#endif /* __LINUX_PERCPU_H */ 137#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d5dd4657c8d6..50e50095c8d1 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -12,6 +12,7 @@
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/gfp.h>
15 16
16#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
17 18
@@ -26,14 +27,14 @@ struct percpu_counter {
26 27
27extern int percpu_counter_batch; 28extern int percpu_counter_batch;
28 29
29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 30int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
30 struct lock_class_key *key); 31 struct lock_class_key *key);
31 32
32#define percpu_counter_init(fbc, value) \ 33#define percpu_counter_init(fbc, value, gfp) \
33 ({ \ 34 ({ \
34 static struct lock_class_key __key; \ 35 static struct lock_class_key __key; \
35 \ 36 \
36 __percpu_counter_init(fbc, value, &__key); \ 37 __percpu_counter_init(fbc, value, gfp, &__key); \
37 }) 38 })
38 39
39void percpu_counter_destroy(struct percpu_counter *fbc); 40void percpu_counter_destroy(struct percpu_counter *fbc);
@@ -89,7 +90,8 @@ struct percpu_counter {
89 s64 count; 90 s64 count;
90}; 91};
91 92
92static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) 93static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
94 gfp_t gfp)
93{ 95{
94 fbc->count = amount; 96 fbc->count = amount;
95 return 0; 97 return 0;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 707617a8c0f6..893a0d07986f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -52,6 +52,7 @@ struct perf_guest_info_callbacks {
52#include <linux/atomic.h> 52#include <linux/atomic.h>
53#include <linux/sysfs.h> 53#include <linux/sysfs.h>
54#include <linux/perf_regs.h> 54#include <linux/perf_regs.h>
55#include <linux/workqueue.h>
55#include <asm/local.h> 56#include <asm/local.h>
56 57
57struct perf_callchain_entry { 58struct perf_callchain_entry {
@@ -268,6 +269,7 @@ struct pmu {
268 * enum perf_event_active_state - the states of a event 269 * enum perf_event_active_state - the states of a event
269 */ 270 */
270enum perf_event_active_state { 271enum perf_event_active_state {
272 PERF_EVENT_STATE_EXIT = -3,
271 PERF_EVENT_STATE_ERROR = -2, 273 PERF_EVENT_STATE_ERROR = -2,
272 PERF_EVENT_STATE_OFF = -1, 274 PERF_EVENT_STATE_OFF = -1,
273 PERF_EVENT_STATE_INACTIVE = 0, 275 PERF_EVENT_STATE_INACTIVE = 0,
@@ -507,6 +509,9 @@ struct perf_event_context {
507 int nr_cgroups; /* cgroup evts */ 509 int nr_cgroups; /* cgroup evts */
508 int nr_branch_stack; /* branch_stack evt */ 510 int nr_branch_stack; /* branch_stack evt */
509 struct rcu_head rcu_head; 511 struct rcu_head rcu_head;
512
513 struct delayed_work orphans_remove;
514 bool orphans_remove_sched;
510}; 515};
511 516
512/* 517/*
@@ -604,6 +609,13 @@ struct perf_sample_data {
604 u64 txn; 609 u64 txn;
605}; 610};
606 611
612/* default value for data source */
613#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
614 PERF_MEM_S(LVL, NA) |\
615 PERF_MEM_S(SNOOP, NA) |\
616 PERF_MEM_S(LOCK, NA) |\
617 PERF_MEM_S(TLB, NA))
618
607static inline void perf_sample_data_init(struct perf_sample_data *data, 619static inline void perf_sample_data_init(struct perf_sample_data *data,
608 u64 addr, u64 period) 620 u64 addr, u64 period)
609{ 621{
@@ -616,7 +628,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
616 data->regs_user.regs = NULL; 628 data->regs_user.regs = NULL;
617 data->stack_user_size = 0; 629 data->stack_user_size = 0;
618 data->weight = 0; 630 data->weight = 0;
619 data->data_src.val = 0; 631 data->data_src.val = PERF_MEM_NA;
620 data->txn = 0; 632 data->txn = 0;
621} 633}
622 634
diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h
deleted file mode 100644
index 4269de99e320..000000000000
--- a/include/linux/phonedev.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __LINUX_PHONEDEV_H
2#define __LINUX_PHONEDEV_H
3
4#include <linux/types.h>
5
6#ifdef __KERNEL__
7
8#include <linux/poll.h>
9
10struct phone_device {
11 struct phone_device *next;
12 const struct file_operations *f_op;
13 int (*open) (struct phone_device *, struct file *);
14 int board; /* Device private index */
15 int minor;
16};
17
18extern int phonedev_init(void);
19#define PHONE_MAJOR 100
20extern int phone_register_device(struct phone_device *, int unit);
21#define PHONE_UNIT_ANY -1
22extern void phone_unregister_device(struct phone_device *);
23
24#endif
25#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ed39956b5613..d090cfcaa167 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -598,6 +598,19 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
598} 598}
599 599
600/** 600/**
601 * phy_read_mmd_indirect - reads data from the MMD registers
602 * @phydev: The PHY device bus
603 * @prtad: MMD Address
604 * @devad: MMD DEVAD
605 * @addr: PHY address on the MII bus
606 *
607 * Description: it reads data from the MMD registers (clause 22 to access to
608 * clause 45) of the specified phy address.
609 */
610int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
611 int devad, int addr);
612
613/**
601 * phy_read - Convenience function for reading a given PHY register 614 * phy_read - Convenience function for reading a given PHY register
602 * @phydev: the phy_device struct 615 * @phydev: the phy_device struct
603 * @regnum: register number to read 616 * @regnum: register number to read
@@ -668,6 +681,20 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
668 return mdiobus_write(phydev->bus, phydev->addr, regnum, val); 681 return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
669} 682}
670 683
684/**
685 * phy_write_mmd_indirect - writes data to the MMD registers
686 * @phydev: The PHY device
687 * @prtad: MMD Address
688 * @devad: MMD DEVAD
689 * @addr: PHY address on the MII bus
690 * @data: data to write in the MMD register
691 *
692 * Description: Write data from the MMD registers of the specified
693 * phy address.
694 */
695void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
696 int devad, int addr, u32 data);
697
671struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, 698struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
672 bool is_c45, 699 bool is_c45,
673 struct phy_c45_device_ids *c45_ids); 700 struct phy_c45_device_ids *c45_ids);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index ae612acebb53..f2ca1b459377 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -14,34 +14,35 @@ struct device_node;
14#ifdef CONFIG_FIXED_PHY 14#ifdef CONFIG_FIXED_PHY
15extern int fixed_phy_add(unsigned int irq, int phy_id, 15extern int fixed_phy_add(unsigned int irq, int phy_id,
16 struct fixed_phy_status *status); 16 struct fixed_phy_status *status);
17extern int fixed_phy_register(unsigned int irq, 17extern struct phy_device *fixed_phy_register(unsigned int irq,
18 struct fixed_phy_status *status, 18 struct fixed_phy_status *status,
19 struct device_node *np); 19 struct device_node *np);
20extern void fixed_phy_del(int phy_addr); 20extern void fixed_phy_del(int phy_addr);
21extern int fixed_phy_set_link_update(struct phy_device *phydev,
22 int (*link_update)(struct net_device *,
23 struct fixed_phy_status *));
21#else 24#else
22static inline int fixed_phy_add(unsigned int irq, int phy_id, 25static inline int fixed_phy_add(unsigned int irq, int phy_id,
23 struct fixed_phy_status *status) 26 struct fixed_phy_status *status)
24{ 27{
25 return -ENODEV; 28 return -ENODEV;
26} 29}
27static inline int fixed_phy_register(unsigned int irq, 30static inline struct phy_device *fixed_phy_register(unsigned int irq,
28 struct fixed_phy_status *status, 31 struct fixed_phy_status *status,
29 struct device_node *np) 32 struct device_node *np)
30{ 33{
31 return -ENODEV; 34 return ERR_PTR(-ENODEV);
32} 35}
33static inline int fixed_phy_del(int phy_addr) 36static inline int fixed_phy_del(int phy_addr)
34{ 37{
35 return -ENODEV; 38 return -ENODEV;
36} 39}
37#endif /* CONFIG_FIXED_PHY */ 40static inline int fixed_phy_set_link_update(struct phy_device *phydev,
38
39/*
40 * This function issued only by fixed_phy-aware drivers, no need
41 * protect it with #ifdef
42 */
43extern int fixed_phy_set_link_update(struct phy_device *phydev,
44 int (*link_update)(struct net_device *, 41 int (*link_update)(struct net_device *,
45 struct fixed_phy_status *)); 42 struct fixed_phy_status *))
43{
44 return -ENODEV;
45}
46#endif /* CONFIG_FIXED_PHY */
46 47
47#endif /* __PHY_FIXED_H */ 48#endif /* __PHY_FIXED_H */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index a15f10727eb8..d578a60eff23 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -57,7 +57,7 @@
57 * which are then pulled up with an external resistor. Setting this 57 * which are then pulled up with an external resistor. Setting this
58 * config will enable open drain mode, the argument is ignored. 58 * config will enable open drain mode, the argument is ignored.
59 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source 59 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
60 * (open emitter). Setting this config will enable open drain mode, the 60 * (open emitter). Setting this config will enable open source mode, the
61 * argument is ignored. 61 * argument is ignored.
62 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current 62 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
63 * passed as argument. The argument is in mA. 63 * passed as argument. The argument is in mA.
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 3097aafbeb24..511bda9ed4bf 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -39,13 +39,12 @@ struct pinctrl_dev;
39 * name can be used with the generic @pinctrl_ops to retrieve the 39 * name can be used with the generic @pinctrl_ops to retrieve the
40 * actual pins affected. The applicable groups will be returned in 40 * actual pins affected. The applicable groups will be returned in
41 * @groups and the number of groups in @num_groups 41 * @groups and the number of groups in @num_groups
42 * @enable: enable a certain muxing function with a certain pin group. The 42 * @set_mux: enable a certain muxing function with a certain pin group. The
43 * driver does not need to figure out whether enabling this function 43 * driver does not need to figure out whether enabling this function
44 * conflicts some other use of the pins in that group, such collisions 44 * conflicts some other use of the pins in that group, such collisions
45 * are handled by the pinmux subsystem. The @func_selector selects a 45 * are handled by the pinmux subsystem. The @func_selector selects a
46 * certain function whereas @group_selector selects a certain set of pins 46 * certain function whereas @group_selector selects a certain set of pins
47 * to be used. On simple controllers the latter argument may be ignored 47 * to be used. On simple controllers the latter argument may be ignored
48 * @disable: disable a certain muxing selector with a certain pin group
49 * @gpio_request_enable: requests and enables GPIO on a certain pin. 48 * @gpio_request_enable: requests and enables GPIO on a certain pin.
50 * Implement this only if you can mux every pin individually as GPIO. The 49 * Implement this only if you can mux every pin individually as GPIO. The
51 * affected GPIO range is passed along with an offset(pin number) into that 50 * affected GPIO range is passed along with an offset(pin number) into that
@@ -68,8 +67,8 @@ struct pinmux_ops {
68 unsigned selector, 67 unsigned selector,
69 const char * const **groups, 68 const char * const **groups,
70 unsigned * const num_groups); 69 unsigned * const num_groups);
71 int (*enable) (struct pinctrl_dev *pctldev, unsigned func_selector, 70 int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
72 unsigned group_selector); 71 unsigned group_selector);
73 int (*gpio_request_enable) (struct pinctrl_dev *pctldev, 72 int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
74 struct pinctrl_gpio_range *range, 73 struct pinctrl_gpio_range *range,
75 unsigned offset); 74 unsigned offset);
diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h
new file mode 100644
index 000000000000..0a03b0944411
--- /dev/null
+++ b/include/linux/platform_data/drv260x-pdata.h
@@ -0,0 +1,28 @@
1/*
2 * Platform data for DRV260X haptics driver family
3 *
4 * Author: Dan Murphy <dmurphy@ti.com>
5 *
6 * Copyright: (C) 2014 Texas Instruments, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef _LINUX_DRV260X_PDATA_H
19#define _LINUX_DRV260X_PDATA_H
20
21struct drv260x_platform_data {
22 u32 library_selection;
23 u32 mode;
24 u32 vib_rated_voltage;
25 u32 vib_overdrive_voltage;
26};
27
28#endif
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
new file mode 100644
index 000000000000..28702c849af1
--- /dev/null
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright(c) 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef GPIO_DW_APB_H
15#define GPIO_DW_APB_H
16
17struct dwapb_port_property {
18 struct device_node *node;
19 const char *name;
20 unsigned int idx;
21 unsigned int ngpio;
22 unsigned int gpio_base;
23 unsigned int irq;
24 bool irq_shared;
25};
26
27struct dwapb_platform_data {
28 struct dwapb_port_property *properties;
29 unsigned int nports;
30};
31
32#endif
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
new file mode 100644
index 000000000000..1419133fa69e
--- /dev/null
+++ b/include/linux/platform_data/isl9305.h
@@ -0,0 +1,30 @@
1/*
2 * isl9305 - Intersil ISL9305 DCDC regulator
3 *
4 * Copyright 2014 Linaro Ltd
5 *
6 * Author: Mark Brown <broonie@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef __ISL9305_H
15#define __ISL9305_H
16
17#define ISL9305_DCD1 0
18#define ISL9305_DCD2 1
19#define ISL9305_LDO1 2
20#define ISL9305_LDO2 3
21
22#define ISL9305_MAX_REGULATOR ISL9305_LDO2
23
24struct regulator_init_data;
25
26struct isl9305_pdata {
27 struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
28};
29
30#endif
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 660c029d694f..16ec262dfcc8 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -21,8 +21,17 @@ enum nand_io {
21}; 21};
22 22
23enum omap_ecc { 23enum omap_ecc {
24 /* 1-bit ECC calculation by GPMC, Error detection by Software */ 24 /*
25 OMAP_ECC_HAM1_CODE_HW = 0, 25 * 1-bit ECC: calculation and correction by SW
26 * ECC stored at end of spare area
27 */
28 OMAP_ECC_HAM1_CODE_SW = 0,
29
30 /*
31 * 1-bit ECC: calculation by GPMC, Error detection by Software
32 * ECC layout compatible with ROM code layout
33 */
34 OMAP_ECC_HAM1_CODE_HW,
26 /* 4-bit ECC calculation by GPMC, Error detection by Software */ 35 /* 4-bit ECC calculation by GPMC, Error detection by Software */
27 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW, 36 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW,
28 /* 4-bit ECC calculation by GPMC, Error detection by ELM */ 37 /* 4-bit ECC calculation by GPMC, Error detection by ELM */
diff --git a/include/linux/platform_data/samsung-usbphy.h b/include/linux/platform_data/samsung-usbphy.h
deleted file mode 100644
index 1bd24cba982b..000000000000
--- a/include/linux/platform_data/samsung-usbphy.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * http://www.samsung.com/
4 * Author: Praveen Paneri <p.paneri@samsung.com>
5 *
6 * Defines platform data for samsung usb phy driver.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef __SAMSUNG_USBPHY_PLATFORM_H
15#define __SAMSUNG_USBPHY_PLATFORM_H
16
17/**
18 * samsung_usbphy_data - Platform data for USB PHY driver.
19 * @pmu_isolation: Function to control usb phy isolation in PMU.
20 */
21struct samsung_usbphy_data {
22 void (*pmu_isolation)(int on);
23};
24
25extern void samsung_usbphy_set_pdata(struct samsung_usbphy_data *pd);
26
27#endif /* __SAMSUNG_USBPHY_PLATFORM_H */
diff --git a/include/linux/platform_data/tegra_emc.h b/include/linux/platform_data/tegra_emc.h
deleted file mode 100644
index df67505e98f8..000000000000
--- a/include/linux/platform_data/tegra_emc.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (C) 2011 Google, Inc.
3 *
4 * Author:
5 * Colin Cross <ccross@android.com>
6 * Olof Johansson <olof@lixom.net>
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef __TEGRA_EMC_H_
20#define __TEGRA_EMC_H_
21
22#define TEGRA_EMC_NUM_REGS 46
23
24struct tegra_emc_table {
25 unsigned long rate;
26 u32 regs[TEGRA_EMC_NUM_REGS];
27};
28
29struct tegra_emc_pdata {
30 int num_tables;
31 struct tegra_emc_table *tables;
32};
33
34#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 72c0fe098a27..383fd68aaee1 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -619,6 +619,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
619 */ 619 */
620struct dev_pm_domain { 620struct dev_pm_domain {
621 struct dev_pm_ops ops; 621 struct dev_pm_ops ops;
622 void (*detach)(struct device *dev, bool power_off);
622}; 623};
623 624
624/* 625/*
@@ -679,12 +680,16 @@ struct dev_pm_domain {
679extern void device_pm_lock(void); 680extern void device_pm_lock(void);
680extern void dpm_resume_start(pm_message_t state); 681extern void dpm_resume_start(pm_message_t state);
681extern void dpm_resume_end(pm_message_t state); 682extern void dpm_resume_end(pm_message_t state);
683extern void dpm_resume_noirq(pm_message_t state);
684extern void dpm_resume_early(pm_message_t state);
682extern void dpm_resume(pm_message_t state); 685extern void dpm_resume(pm_message_t state);
683extern void dpm_complete(pm_message_t state); 686extern void dpm_complete(pm_message_t state);
684 687
685extern void device_pm_unlock(void); 688extern void device_pm_unlock(void);
686extern int dpm_suspend_end(pm_message_t state); 689extern int dpm_suspend_end(pm_message_t state);
687extern int dpm_suspend_start(pm_message_t state); 690extern int dpm_suspend_start(pm_message_t state);
691extern int dpm_suspend_noirq(pm_message_t state);
692extern int dpm_suspend_late(pm_message_t state);
688extern int dpm_suspend(pm_message_t state); 693extern int dpm_suspend(pm_message_t state);
689extern int dpm_prepare(pm_message_t state); 694extern int dpm_prepare(pm_message_t state);
690 695
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 7c1d252b20c0..73e938b7e937 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -35,18 +35,10 @@ struct gpd_dev_ops {
35 int (*stop)(struct device *dev); 35 int (*stop)(struct device *dev);
36 int (*save_state)(struct device *dev); 36 int (*save_state)(struct device *dev);
37 int (*restore_state)(struct device *dev); 37 int (*restore_state)(struct device *dev);
38 int (*suspend)(struct device *dev);
39 int (*suspend_late)(struct device *dev);
40 int (*resume_early)(struct device *dev);
41 int (*resume)(struct device *dev);
42 int (*freeze)(struct device *dev);
43 int (*freeze_late)(struct device *dev);
44 int (*thaw_early)(struct device *dev);
45 int (*thaw)(struct device *dev);
46 bool (*active_wakeup)(struct device *dev); 38 bool (*active_wakeup)(struct device *dev);
47}; 39};
48 40
49struct gpd_cpu_data { 41struct gpd_cpuidle_data {
50 unsigned int saved_exit_latency; 42 unsigned int saved_exit_latency;
51 struct cpuidle_state *idle_state; 43 struct cpuidle_state *idle_state;
52}; 44};
@@ -60,7 +52,7 @@ struct generic_pm_domain {
60 struct mutex lock; 52 struct mutex lock;
61 struct dev_power_governor *gov; 53 struct dev_power_governor *gov;
62 struct work_struct power_off_work; 54 struct work_struct power_off_work;
63 char *name; 55 const char *name;
64 unsigned int in_progress; /* Number of devices being suspended now */ 56 unsigned int in_progress; /* Number of devices being suspended now */
65 atomic_t sd_count; /* Number of subdomains with power "on" */ 57 atomic_t sd_count; /* Number of subdomains with power "on" */
66 enum gpd_status status; /* Current state of the domain */ 58 enum gpd_status status; /* Current state of the domain */
@@ -71,7 +63,6 @@ struct generic_pm_domain {
71 unsigned int suspended_count; /* System suspend device counter */ 63 unsigned int suspended_count; /* System suspend device counter */
72 unsigned int prepared_count; /* Suspend counter of prepared devices */ 64 unsigned int prepared_count; /* Suspend counter of prepared devices */
73 bool suspend_power_off; /* Power status before system suspend */ 65 bool suspend_power_off; /* Power status before system suspend */
74 bool dev_irq_safe; /* Device callbacks are IRQ-safe */
75 int (*power_off)(struct generic_pm_domain *domain); 66 int (*power_off)(struct generic_pm_domain *domain);
76 s64 power_off_latency_ns; 67 s64 power_off_latency_ns;
77 int (*power_on)(struct generic_pm_domain *domain); 68 int (*power_on)(struct generic_pm_domain *domain);
@@ -80,8 +71,9 @@ struct generic_pm_domain {
80 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ 71 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
81 bool max_off_time_changed; 72 bool max_off_time_changed;
82 bool cached_power_down_ok; 73 bool cached_power_down_ok;
83 struct device_node *of_node; /* Node in device tree */ 74 struct gpd_cpuidle_data *cpuidle_data;
84 struct gpd_cpu_data *cpu_data; 75 void (*attach_dev)(struct device *dev);
76 void (*detach_dev)(struct device *dev);
85}; 77};
86 78
87static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 79static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -108,7 +100,6 @@ struct gpd_timing_data {
108 100
109struct generic_pm_domain_data { 101struct generic_pm_domain_data {
110 struct pm_domain_data base; 102 struct pm_domain_data base;
111 struct gpd_dev_ops ops;
112 struct gpd_timing_data td; 103 struct gpd_timing_data td;
113 struct notifier_block nb; 104 struct notifier_block nb;
114 struct mutex lock; 105 struct mutex lock;
@@ -127,17 +118,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
127 return to_gpd_data(dev->power.subsys_data->domain_data); 118 return to_gpd_data(dev->power.subsys_data->domain_data);
128} 119}
129 120
130extern struct dev_power_governor simple_qos_governor;
131
132extern struct generic_pm_domain *dev_to_genpd(struct device *dev); 121extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
133extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, 122extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
134 struct device *dev, 123 struct device *dev,
135 struct gpd_timing_data *td); 124 struct gpd_timing_data *td);
136 125
137extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
138 struct device *dev,
139 struct gpd_timing_data *td);
140
141extern int __pm_genpd_name_add_device(const char *domain_name, 126extern int __pm_genpd_name_add_device(const char *domain_name,
142 struct device *dev, 127 struct device *dev,
143 struct gpd_timing_data *td); 128 struct gpd_timing_data *td);
@@ -151,10 +136,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name,
151 const char *subdomain_name); 136 const char *subdomain_name);
152extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 137extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
153 struct generic_pm_domain *target); 138 struct generic_pm_domain *target);
154extern int pm_genpd_add_callbacks(struct device *dev,
155 struct gpd_dev_ops *ops,
156 struct gpd_timing_data *td);
157extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
158extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); 139extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
159extern int pm_genpd_name_attach_cpuidle(const char *name, int state); 140extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
160extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); 141extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
@@ -165,8 +146,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd,
165extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 146extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
166extern int pm_genpd_name_poweron(const char *domain_name); 147extern int pm_genpd_name_poweron(const char *domain_name);
167 148
168extern bool default_stop_ok(struct device *dev); 149extern struct dev_power_governor simple_qos_governor;
169
170extern struct dev_power_governor pm_domain_always_on_gov; 150extern struct dev_power_governor pm_domain_always_on_gov;
171#else 151#else
172 152
@@ -184,12 +164,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
184{ 164{
185 return -ENOSYS; 165 return -ENOSYS;
186} 166}
187static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
188 struct device *dev,
189 struct gpd_timing_data *td)
190{
191 return -ENOSYS;
192}
193static inline int __pm_genpd_name_add_device(const char *domain_name, 167static inline int __pm_genpd_name_add_device(const char *domain_name,
194 struct device *dev, 168 struct device *dev,
195 struct gpd_timing_data *td) 169 struct gpd_timing_data *td)
@@ -217,16 +191,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
217{ 191{
218 return -ENOSYS; 192 return -ENOSYS;
219} 193}
220static inline int pm_genpd_add_callbacks(struct device *dev,
221 struct gpd_dev_ops *ops,
222 struct gpd_timing_data *td)
223{
224 return -ENOSYS;
225}
226static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
227{
228 return -ENOSYS;
229}
230static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) 194static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
231{ 195{
232 return -ENOSYS; 196 return -ENOSYS;
@@ -255,10 +219,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
255{ 219{
256 return -ENOSYS; 220 return -ENOSYS;
257} 221}
258static inline bool default_stop_ok(struct device *dev)
259{
260 return false;
261}
262#define simple_qos_governor NULL 222#define simple_qos_governor NULL
263#define pm_domain_always_on_gov NULL 223#define pm_domain_always_on_gov NULL
264#endif 224#endif
@@ -269,45 +229,87 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
269 return __pm_genpd_add_device(genpd, dev, NULL); 229 return __pm_genpd_add_device(genpd, dev, NULL);
270} 230}
271 231
272static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
273 struct device *dev)
274{
275 return __pm_genpd_of_add_device(genpd_node, dev, NULL);
276}
277
278static inline int pm_genpd_name_add_device(const char *domain_name, 232static inline int pm_genpd_name_add_device(const char *domain_name,
279 struct device *dev) 233 struct device *dev)
280{ 234{
281 return __pm_genpd_name_add_device(domain_name, dev, NULL); 235 return __pm_genpd_name_add_device(domain_name, dev, NULL);
282} 236}
283 237
284static inline int pm_genpd_remove_callbacks(struct device *dev)
285{
286 return __pm_genpd_remove_callbacks(dev, true);
287}
288
289#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME 238#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
290extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
291extern void pm_genpd_poweroff_unused(void); 239extern void pm_genpd_poweroff_unused(void);
292#else 240#else
293static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
294static inline void pm_genpd_poweroff_unused(void) {} 241static inline void pm_genpd_poweroff_unused(void) {}
295#endif 242#endif
296 243
297#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP 244#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
298extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); 245extern void pm_genpd_syscore_poweroff(struct device *dev);
246extern void pm_genpd_syscore_poweron(struct device *dev);
299#else 247#else
300static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} 248static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
249static inline void pm_genpd_syscore_poweron(struct device *dev) {}
301#endif 250#endif
302 251
303static inline void pm_genpd_syscore_poweroff(struct device *dev) 252/* OF PM domain providers */
253struct of_device_id;
254
255struct genpd_onecell_data {
256 struct generic_pm_domain **domains;
257 unsigned int num_domains;
258};
259
260typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
261 void *data);
262
263#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
264int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
265 void *data);
266void of_genpd_del_provider(struct device_node *np);
267
268struct generic_pm_domain *__of_genpd_xlate_simple(
269 struct of_phandle_args *genpdspec,
270 void *data);
271struct generic_pm_domain *__of_genpd_xlate_onecell(
272 struct of_phandle_args *genpdspec,
273 void *data);
274
275int genpd_dev_pm_attach(struct device *dev);
276#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
277static inline int __of_genpd_add_provider(struct device_node *np,
278 genpd_xlate_t xlate, void *data)
279{
280 return 0;
281}
282static inline void of_genpd_del_provider(struct device_node *np) {}
283
284#define __of_genpd_xlate_simple NULL
285#define __of_genpd_xlate_onecell NULL
286
287static inline int genpd_dev_pm_attach(struct device *dev)
288{
289 return -ENODEV;
290}
291#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
292
293static inline int of_genpd_add_provider_simple(struct device_node *np,
294 struct generic_pm_domain *genpd)
295{
296 return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
297}
298static inline int of_genpd_add_provider_onecell(struct device_node *np,
299 struct genpd_onecell_data *data)
304{ 300{
305 pm_genpd_syscore_switch(dev, true); 301 return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
306} 302}
307 303
308static inline void pm_genpd_syscore_poweron(struct device *dev) 304#ifdef CONFIG_PM
305extern int dev_pm_domain_attach(struct device *dev, bool power_on);
306extern void dev_pm_domain_detach(struct device *dev, bool power_off);
307#else
308static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
309{ 309{
310 pm_genpd_syscore_switch(dev, false); 310 return -ENODEV;
311} 311}
312static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
313#endif
312 314
313#endif /* _LINUX_PM_DOMAIN_H */ 315#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 9d117f61d976..b97bf2ef996e 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -74,6 +74,8 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
74 74
75#endif /* CONFIG_PROC_FS */ 75#endif /* CONFIG_PROC_FS */
76 76
77struct net;
78
77static inline struct proc_dir_entry *proc_net_mkdir( 79static inline struct proc_dir_entry *proc_net_mkdir(
78 struct net *net, const char *name, struct proc_dir_entry *parent) 80 struct net *net, const char *name, struct proc_dir_entry *parent)
79{ 81{
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 26a8a4ed9b07..00e8e8fa7358 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -12,6 +12,7 @@
12#include <linux/percpu_counter.h> 12#include <linux/percpu_counter.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/gfp.h>
15 16
16struct prop_global { 17struct prop_global {
17 /* 18 /*
@@ -40,7 +41,7 @@ struct prop_descriptor {
40 struct mutex mutex; /* serialize the prop_global switch */ 41 struct mutex mutex; /* serialize the prop_global switch */
41}; 42};
42 43
43int prop_descriptor_init(struct prop_descriptor *pd, int shift); 44int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
44void prop_change_shift(struct prop_descriptor *pd, int new_shift); 45void prop_change_shift(struct prop_descriptor *pd, int new_shift);
45 46
46/* 47/*
@@ -61,7 +62,7 @@ struct prop_local_percpu {
61 raw_spinlock_t lock; /* protect the snapshot state */ 62 raw_spinlock_t lock; /* protect the snapshot state */
62}; 63};
63 64
64int prop_local_init_percpu(struct prop_local_percpu *pl); 65int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
65void prop_local_destroy_percpu(struct prop_local_percpu *pl); 66void prop_local_destroy_percpu(struct prop_local_percpu *pl);
66void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); 67void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
67void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, 68void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
diff --git a/include/linux/random.h b/include/linux/random.h
index 57fbbffd77a0..b05856e16b75 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -26,7 +26,7 @@ unsigned int get_random_int(void);
26unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); 26unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
27 27
28u32 prandom_u32(void); 28u32 prandom_u32(void);
29void prandom_bytes(void *buf, int nbytes); 29void prandom_bytes(void *buf, size_t nbytes);
30void prandom_seed(u32 seed); 30void prandom_seed(u32 seed);
31void prandom_reseed_late(void); 31void prandom_reseed_late(void);
32 32
@@ -35,7 +35,7 @@ struct rnd_state {
35}; 35};
36 36
37u32 prandom_u32_state(struct rnd_state *state); 37u32 prandom_u32_state(struct rnd_state *state);
38void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); 38void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
39 39
40/** 40/**
41 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) 41 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d231aa17b1d7..a4a819ffb2d1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -47,14 +47,12 @@
47#include <asm/barrier.h> 47#include <asm/barrier.h>
48 48
49extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
50#ifdef CONFIG_RCU_TORTURE_TEST
51extern int rcutorture_runnable; /* for sysctl */
52#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
53 50
54enum rcutorture_type { 51enum rcutorture_type {
55 RCU_FLAVOR, 52 RCU_FLAVOR,
56 RCU_BH_FLAVOR, 53 RCU_BH_FLAVOR,
57 RCU_SCHED_FLAVOR, 54 RCU_SCHED_FLAVOR,
55 RCU_TASKS_FLAVOR,
58 SRCU_FLAVOR, 56 SRCU_FLAVOR,
59 INVALID_RCU_FLAVOR 57 INVALID_RCU_FLAVOR
60}; 58};
@@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head,
197 195
198void synchronize_sched(void); 196void synchronize_sched(void);
199 197
198/**
199 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
200 * @head: structure to be used for queueing the RCU updates.
201 * @func: actual callback function to be invoked after the grace period
202 *
203 * The callback function will be invoked some time after a full grace
204 * period elapses, in other words after all currently executing RCU
205 * read-side critical sections have completed. call_rcu_tasks() assumes
206 * that the read-side critical sections end at a voluntary context
207 * switch (not a preemption!), entry into idle, or transition to usermode
208 * execution. As such, there are no read-side primitives analogous to
209 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
210 * to determine that all tasks have passed through a safe state, not so
211 * much for data-strcuture synchronization.
212 *
213 * See the description of call_rcu() for more detailed information on
214 * memory ordering guarantees.
215 */
216void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
217void synchronize_rcu_tasks(void);
218void rcu_barrier_tasks(void);
219
200#ifdef CONFIG_PREEMPT_RCU 220#ifdef CONFIG_PREEMPT_RCU
201 221
202void __rcu_read_lock(void); 222void __rcu_read_lock(void);
@@ -238,8 +258,8 @@ static inline int rcu_preempt_depth(void)
238 258
239/* Internal to kernel */ 259/* Internal to kernel */
240void rcu_init(void); 260void rcu_init(void);
241void rcu_sched_qs(int cpu); 261void rcu_sched_qs(void);
242void rcu_bh_qs(int cpu); 262void rcu_bh_qs(void);
243void rcu_check_callbacks(int cpu, int user); 263void rcu_check_callbacks(int cpu, int user);
244struct notifier_block; 264struct notifier_block;
245void rcu_idle_enter(void); 265void rcu_idle_enter(void);
@@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
269 struct task_struct *next) { } 289 struct task_struct *next) { }
270#endif /* CONFIG_RCU_USER_QS */ 290#endif /* CONFIG_RCU_USER_QS */
271 291
292#ifdef CONFIG_RCU_NOCB_CPU
293void rcu_init_nohz(void);
294#else /* #ifdef CONFIG_RCU_NOCB_CPU */
295static inline void rcu_init_nohz(void)
296{
297}
298#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
299
272/** 300/**
273 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 301 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
274 * @a: Code that RCU needs to pay attention to. 302 * @a: Code that RCU needs to pay attention to.
@@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
294 rcu_irq_exit(); \ 322 rcu_irq_exit(); \
295 } while (0) 323 } while (0)
296 324
325/*
326 * Note a voluntary context switch for RCU-tasks benefit. This is a
327 * macro rather than an inline function to avoid #include hell.
328 */
329#ifdef CONFIG_TASKS_RCU
330#define TASKS_RCU(x) x
331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \
333 do { \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0)
340#endif /* #else #ifdef CONFIG_TASKS_RCU */
341
342/**
343 * cond_resched_rcu_qs - Report potential quiescent states to RCU
344 *
345 * This macro resembles cond_resched(), except that it is defined to
346 * report potential quiescent states to RCU-tasks even if the cond_resched()
347 * machinery were to be shut off, as some advocate for PREEMPT kernels.
348 */
349#define cond_resched_rcu_qs() \
350do { \
351 rcu_note_voluntary_context_switch(current); \
352 cond_resched(); \
353} while (0)
354
297#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 355#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
298bool __rcu_is_watching(void); 356bool __rcu_is_watching(void);
299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 357#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
@@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void);
349#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 407#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
350static inline bool rcu_lockdep_current_cpu_online(void) 408static inline bool rcu_lockdep_current_cpu_online(void)
351{ 409{
352 return 1; 410 return true;
353} 411}
354#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 412#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
355 413
@@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map;
371extern struct lockdep_map rcu_callback_map; 429extern struct lockdep_map rcu_callback_map;
372int debug_lockdep_rcu_enabled(void); 430int debug_lockdep_rcu_enabled(void);
373 431
374/** 432int rcu_read_lock_held(void);
375 * rcu_read_lock_held() - might we be in RCU read-side critical section?
376 *
377 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
378 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
379 * this assumes we are in an RCU read-side critical section unless it can
380 * prove otherwise. This is useful for debug checks in functions that
381 * require that they be called within an RCU read-side critical section.
382 *
383 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
384 * and while lockdep is disabled.
385 *
386 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
387 * occur in the same context, for example, it is illegal to invoke
388 * rcu_read_unlock() in process context if the matching rcu_read_lock()
389 * was invoked from within an irq handler.
390 *
391 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
392 * offline from an RCU perspective, so check for those as well.
393 */
394static inline int rcu_read_lock_held(void)
395{
396 if (!debug_lockdep_rcu_enabled())
397 return 1;
398 if (!rcu_is_watching())
399 return 0;
400 if (!rcu_lockdep_current_cpu_online())
401 return 0;
402 return lock_is_held(&rcu_lock_map);
403}
404
405/*
406 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
407 * hell.
408 */
409int rcu_read_lock_bh_held(void); 433int rcu_read_lock_bh_held(void);
410 434
411/** 435/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d40a6a451330..38cc5b1e252d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
80 80
81static inline void rcu_note_context_switch(int cpu) 81static inline void rcu_note_context_switch(int cpu)
82{ 82{
83 rcu_sched_qs(cpu); 83 rcu_sched_qs();
84} 84}
85 85
86/* 86/*
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 48bf152761c7..67fc8fcdc4b0 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -38,6 +38,9 @@ extern int reboot_force;
38extern int register_reboot_notifier(struct notifier_block *); 38extern int register_reboot_notifier(struct notifier_block *);
39extern int unregister_reboot_notifier(struct notifier_block *); 39extern int unregister_reboot_notifier(struct notifier_block *);
40 40
41extern int register_restart_handler(struct notifier_block *);
42extern int unregister_restart_handler(struct notifier_block *);
43extern void do_kernel_restart(char *cmd);
41 44
42/* 45/*
43 * Architecture-specific implementations of sys_reboot commands. 46 * Architecture-specific implementations of sys_reboot commands.
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a8733068a7..d347c805f923 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -93,7 +93,12 @@ struct regmap;
93 * OVER_TEMP Regulator over temp. 93 * OVER_TEMP Regulator over temp.
94 * FORCE_DISABLE Regulator forcibly shut down by software. 94 * FORCE_DISABLE Regulator forcibly shut down by software.
95 * VOLTAGE_CHANGE Regulator voltage changed. 95 * VOLTAGE_CHANGE Regulator voltage changed.
96 * Data passed is old voltage cast to (void *).
96 * DISABLE Regulator was disabled. 97 * DISABLE Regulator was disabled.
98 * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
99 * Data passed is "struct pre_voltage_change_data"
100 * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
101 * Data passed is old voltage cast to (void *).
97 * 102 *
98 * NOTE: These events can be OR'ed together when passed into handler. 103 * NOTE: These events can be OR'ed together when passed into handler.
99 */ 104 */
@@ -106,6 +111,21 @@ struct regmap;
106#define REGULATOR_EVENT_FORCE_DISABLE 0x20 111#define REGULATOR_EVENT_FORCE_DISABLE 0x20
107#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 112#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
108#define REGULATOR_EVENT_DISABLE 0x80 113#define REGULATOR_EVENT_DISABLE 0x80
114#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
115#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
116
117/**
118 * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
119 *
120 * @old_uV: Current voltage before change.
121 * @min_uV: Min voltage we'll change to.
122 * @max_uV: Max voltage we'll change to.
123 */
124struct pre_voltage_change_data {
125 unsigned long old_uV;
126 unsigned long min_uV;
127 unsigned long max_uV;
128};
109 129
110struct regulator; 130struct regulator;
111 131
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 0981ce0e72cc..5479394fefce 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * da9211.h - Regulator device driver for DA9211 2 * da9211.h - Regulator device driver for DA9211/DA9213
3 * Copyright (C) 2014 Dialog Semiconductor Ltd. 3 * Copyright (C) 2014 Dialog Semiconductor Ltd.
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This library is free software; you can redistribute it and/or
@@ -20,6 +20,11 @@
20 20
21#define DA9211_MAX_REGULATORS 2 21#define DA9211_MAX_REGULATORS 2
22 22
23enum da9211_chip_id {
24 DA9211,
25 DA9213,
26};
27
23struct da9211_pdata { 28struct da9211_pdata {
24 /* 29 /*
25 * Number of buck 30 * Number of buck
@@ -27,6 +32,6 @@ struct da9211_pdata {
27 * 2 : 2 phase 2 buck 32 * 2 : 2 phase 2 buck
28 */ 33 */
29 int num_buck; 34 int num_buck;
30 struct regulator_init_data *init_data; 35 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
31}; 36};
32#endif 37#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index bbe03a1924c0..fc0ee0ce8325 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -203,6 +203,8 @@ enum regulator_type {
203 * 203 *
204 * @name: Identifying name for the regulator. 204 * @name: Identifying name for the regulator.
205 * @supply_name: Identifying the regulator supply 205 * @supply_name: Identifying the regulator supply
206 * @of_match: Name used to identify regulator in DT.
207 * @regulators_node: Name of node containing regulator definitions in DT.
206 * @id: Numerical identifier for the regulator. 208 * @id: Numerical identifier for the regulator.
207 * @ops: Regulator operations table. 209 * @ops: Regulator operations table.
208 * @irq: Interrupt number for the regulator. 210 * @irq: Interrupt number for the regulator.
@@ -218,6 +220,8 @@ enum regulator_type {
218 * @linear_min_sel: Minimal selector for starting linear mapping 220 * @linear_min_sel: Minimal selector for starting linear mapping
219 * @fixed_uV: Fixed voltage of rails. 221 * @fixed_uV: Fixed voltage of rails.
220 * @ramp_delay: Time to settle down after voltage change (unit: uV/us) 222 * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
223 * @linear_ranges: A constant table of possible voltage ranges.
224 * @n_linear_ranges: Number of entries in the @linear_ranges table.
221 * @volt_table: Voltage mapping table (if table based mapping) 225 * @volt_table: Voltage mapping table (if table based mapping)
222 * 226 *
223 * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ 227 * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
@@ -238,14 +242,17 @@ enum regulator_type {
238 * @bypass_val_off: Disabling value for control when using regmap set_bypass 242 * @bypass_val_off: Disabling value for control when using regmap set_bypass
239 * 243 *
240 * @enable_time: Time taken for initial enable of regulator (in uS). 244 * @enable_time: Time taken for initial enable of regulator (in uS).
245 * @off_on_delay: guard time (in uS), before re-enabling a regulator
241 */ 246 */
242struct regulator_desc { 247struct regulator_desc {
243 const char *name; 248 const char *name;
244 const char *supply_name; 249 const char *supply_name;
250 const char *of_match;
251 const char *regulators_node;
245 int id; 252 int id;
246 bool continuous_voltage_range; 253 bool continuous_voltage_range;
247 unsigned n_voltages; 254 unsigned n_voltages;
248 struct regulator_ops *ops; 255 const struct regulator_ops *ops;
249 int irq; 256 int irq;
250 enum regulator_type type; 257 enum regulator_type type;
251 struct module *owner; 258 struct module *owner;
@@ -276,6 +283,8 @@ struct regulator_desc {
276 unsigned int bypass_val_off; 283 unsigned int bypass_val_off;
277 284
278 unsigned int enable_time; 285 unsigned int enable_time;
286
287 unsigned int off_on_delay;
279}; 288};
280 289
281/** 290/**
@@ -348,6 +357,9 @@ struct regulator_dev {
348 357
349 struct regulator_enable_gpio *ena_pin; 358 struct regulator_enable_gpio *ena_pin;
350 unsigned int ena_gpio_state:1; 359 unsigned int ena_gpio_state:1;
360
361 /* time when this regulator was disabled last time */
362 unsigned long last_off_jiffy;
351}; 363};
352 364
353struct regulator_dev * 365struct regulator_dev *
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 730e638c5589..0b08d05d470b 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -85,6 +85,7 @@ struct regulator_state {
85 * bootloader then it will be enabled when the constraints are 85 * bootloader then it will be enabled when the constraints are
86 * applied. 86 * applied.
87 * @apply_uV: Apply the voltage constraint when initialising. 87 * @apply_uV: Apply the voltage constraint when initialising.
88 * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
88 * 89 *
89 * @input_uV: Input voltage for regulator when supplied by another regulator. 90 * @input_uV: Input voltage for regulator when supplied by another regulator.
90 * 91 *
diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h
index de9a7fae20be..cedd0febe882 100644
--- a/include/linux/regulator/max1586.h
+++ b/include/linux/regulator/max1586.h
@@ -40,7 +40,7 @@
40 */ 40 */
41struct max1586_subdev_data { 41struct max1586_subdev_data {
42 int id; 42 int id;
43 char *name; 43 const char *name;
44 struct regulator_init_data *platform_data; 44 struct regulator_init_data *platform_data;
45}; 45};
46 46
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 36826c0166c5..fb298e9d6d3a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -44,6 +44,7 @@ struct rhashtable;
44 * @head_offset: Offset of rhash_head in struct to be hashed 44 * @head_offset: Offset of rhash_head in struct to be hashed
45 * @hash_rnd: Seed to use while hashing 45 * @hash_rnd: Seed to use while hashing
46 * @max_shift: Maximum number of shifts while expanding 46 * @max_shift: Maximum number of shifts while expanding
47 * @min_shift: Minimum number of shifts while shrinking
47 * @hashfn: Function to hash key 48 * @hashfn: Function to hash key
48 * @obj_hashfn: Function to hash object 49 * @obj_hashfn: Function to hash object
49 * @grow_decision: If defined, may return true if table should expand 50 * @grow_decision: If defined, may return true if table should expand
@@ -57,6 +58,7 @@ struct rhashtable_params {
57 size_t head_offset; 58 size_t head_offset;
58 u32 hash_rnd; 59 u32 hash_rnd;
59 size_t max_shift; 60 size_t max_shift;
61 size_t min_shift;
60 rht_hashfn_t hashfn; 62 rht_hashfn_t hashfn;
61 rht_obj_hashfn_t obj_hashfn; 63 rht_obj_hashfn_t obj_hashfn;
62 bool (*grow_decision)(const struct rhashtable *ht, 64 bool (*grow_decision)(const struct rhashtable *ht,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index be574506e6a9..c0c2bce6b0b7 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
150static inline void anon_vma_merge(struct vm_area_struct *vma, 150static inline void anon_vma_merge(struct vm_area_struct *vma,
151 struct vm_area_struct *next) 151 struct vm_area_struct *next)
152{ 152{
153 VM_BUG_ON(vma->anon_vma != next->anon_vma); 153 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
154 unlink_anon_vmas(next); 154 unlink_anon_vmas(next);
155} 155}
156 156
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 167bae7bdfa4..6cacbce1a06c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -47,6 +47,16 @@ static inline int lockdep_rtnl_is_held(void)
47 rcu_dereference_check(p, lockdep_rtnl_is_held()) 47 rcu_dereference_check(p, lockdep_rtnl_is_held())
48 48
49/** 49/**
50 * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
51 * @p: The pointer to read, prior to dereference
52 *
53 * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
54 * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
55 */
56#define rcu_dereference_bh_rtnl(p) \
57 rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
58
59/**
50 * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL 60 * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
51 * @p: The pointer to read, prior to dereferencing 61 * @p: The pointer to read, prior to dereferencing
52 * 62 *
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 035d3c57fc8a..8f498cdde280 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
149 * static then another method for expressing nested locking is 149 * static then another method for expressing nested locking is
150 * the explicit definition of lock class keys and the use of 150 * the explicit definition of lock class keys and the use of
151 * lockdep_set_class() at lock initialization time. 151 * lockdep_set_class() at lock initialization time.
152 * See Documentation/lockdep-design.txt for more details.) 152 * See Documentation/locking/lockdep-design.txt for more details.)
153 */ 153 */
154extern void down_read_nested(struct rw_semaphore *sem, int subclass); 154extern void down_read_nested(struct rw_semaphore *sem, int subclass);
155extern void down_write_nested(struct rw_semaphore *sem, int subclass); 155extern void down_write_nested(struct rw_semaphore *sem, int subclass);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5c2c885ee52b..5e344bbe63ec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -57,6 +57,7 @@ struct sched_param {
57#include <linux/llist.h> 57#include <linux/llist.h>
58#include <linux/uidgid.h> 58#include <linux/uidgid.h>
59#include <linux/gfp.h> 59#include <linux/gfp.h>
60#include <linux/magic.h>
60 61
61#include <asm/processor.h> 62#include <asm/processor.h>
62 63
@@ -167,6 +168,7 @@ extern int nr_threads;
167DECLARE_PER_CPU(unsigned long, process_counts); 168DECLARE_PER_CPU(unsigned long, process_counts);
168extern int nr_processes(void); 169extern int nr_processes(void);
169extern unsigned long nr_running(void); 170extern unsigned long nr_running(void);
171extern bool single_task_running(void);
170extern unsigned long nr_iowait(void); 172extern unsigned long nr_iowait(void);
171extern unsigned long nr_iowait_cpu(int cpu); 173extern unsigned long nr_iowait_cpu(int cpu);
172extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); 174extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
@@ -645,6 +647,7 @@ struct signal_struct {
645 * Live threads maintain their own counters and add to these 647 * Live threads maintain their own counters and add to these
646 * in __exit_signal, except for the group leader. 648 * in __exit_signal, except for the group leader.
647 */ 649 */
650 seqlock_t stats_lock;
648 cputime_t utime, stime, cutime, cstime; 651 cputime_t utime, stime, cutime, cstime;
649 cputime_t gtime; 652 cputime_t gtime;
650 cputime_t cgtime; 653 cputime_t cgtime;
@@ -1023,6 +1026,7 @@ struct sched_domain_topology_level {
1023extern struct sched_domain_topology_level *sched_domain_topology; 1026extern struct sched_domain_topology_level *sched_domain_topology;
1024 1027
1025extern void set_sched_topology(struct sched_domain_topology_level *tl); 1028extern void set_sched_topology(struct sched_domain_topology_level *tl);
1029extern void wake_up_if_idle(int cpu);
1026 1030
1027#ifdef CONFIG_SCHED_DEBUG 1031#ifdef CONFIG_SCHED_DEBUG
1028# define SD_INIT_NAME(type) .name = #type 1032# define SD_INIT_NAME(type) .name = #type
@@ -1212,6 +1216,13 @@ struct sched_dl_entity {
1212 struct hrtimer dl_timer; 1216 struct hrtimer dl_timer;
1213}; 1217};
1214 1218
1219union rcu_special {
1220 struct {
1221 bool blocked;
1222 bool need_qs;
1223 } b;
1224 short s;
1225};
1215struct rcu_node; 1226struct rcu_node;
1216 1227
1217enum perf_event_task_context { 1228enum perf_event_task_context {
@@ -1264,12 +1275,18 @@ struct task_struct {
1264 1275
1265#ifdef CONFIG_PREEMPT_RCU 1276#ifdef CONFIG_PREEMPT_RCU
1266 int rcu_read_lock_nesting; 1277 int rcu_read_lock_nesting;
1267 char rcu_read_unlock_special; 1278 union rcu_special rcu_read_unlock_special;
1268 struct list_head rcu_node_entry; 1279 struct list_head rcu_node_entry;
1269#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1280#endif /* #ifdef CONFIG_PREEMPT_RCU */
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1281#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1282 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1283#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1284#ifdef CONFIG_TASKS_RCU
1285 unsigned long rcu_tasks_nvcsw;
1286 bool rcu_tasks_holdout;
1287 struct list_head rcu_tasks_holdout_list;
1288 int rcu_tasks_idle_cpu;
1289#endif /* #ifdef CONFIG_TASKS_RCU */
1273 1290
1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1291#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1275 struct sched_info sched_info; 1292 struct sched_info sched_info;
@@ -1903,8 +1920,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1903#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1920#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1904#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1921#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1905#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1922#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1906#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1907#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1908#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1923#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1909#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1924#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1910#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1925#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
@@ -1936,11 +1951,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1936#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1951#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1937#define used_math() tsk_used_math(current) 1952#define used_math() tsk_used_math(current)
1938 1953
1939/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ 1954/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
1955 * __GFP_FS is also cleared as it implies __GFP_IO.
1956 */
1940static inline gfp_t memalloc_noio_flags(gfp_t flags) 1957static inline gfp_t memalloc_noio_flags(gfp_t flags)
1941{ 1958{
1942 if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 1959 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1943 flags &= ~__GFP_IO; 1960 flags &= ~(__GFP_IO | __GFP_FS);
1944 return flags; 1961 return flags;
1945} 1962}
1946 1963
@@ -1957,17 +1974,31 @@ static inline void memalloc_noio_restore(unsigned int flags)
1957} 1974}
1958 1975
1959/* Per-process atomic flags. */ 1976/* Per-process atomic flags. */
1960#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */ 1977#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1978#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1979#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1961 1980
1962static inline bool task_no_new_privs(struct task_struct *p)
1963{
1964 return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
1965}
1966 1981
1967static inline void task_set_no_new_privs(struct task_struct *p) 1982#define TASK_PFA_TEST(name, func) \
1968{ 1983 static inline bool task_##func(struct task_struct *p) \
1969 set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); 1984 { return test_bit(PFA_##name, &p->atomic_flags); }
1970} 1985#define TASK_PFA_SET(name, func) \
1986 static inline void task_set_##func(struct task_struct *p) \
1987 { set_bit(PFA_##name, &p->atomic_flags); }
1988#define TASK_PFA_CLEAR(name, func) \
1989 static inline void task_clear_##func(struct task_struct *p) \
1990 { clear_bit(PFA_##name, &p->atomic_flags); }
1991
1992TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1993TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1994
1995TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1996TASK_PFA_SET(SPREAD_PAGE, spread_page)
1997TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1998
1999TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2000TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2001TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1971 2002
1972/* 2003/*
1973 * task->jobctl flags 2004 * task->jobctl flags
@@ -1999,29 +2030,21 @@ extern void task_clear_jobctl_trapping(struct task_struct *task);
1999extern void task_clear_jobctl_pending(struct task_struct *task, 2030extern void task_clear_jobctl_pending(struct task_struct *task,
2000 unsigned int mask); 2031 unsigned int mask);
2001 2032
2002#ifdef CONFIG_PREEMPT_RCU
2003
2004#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
2005#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
2006
2007static inline void rcu_copy_process(struct task_struct *p) 2033static inline void rcu_copy_process(struct task_struct *p)
2008{ 2034{
2035#ifdef CONFIG_PREEMPT_RCU
2009 p->rcu_read_lock_nesting = 0; 2036 p->rcu_read_lock_nesting = 0;
2010 p->rcu_read_unlock_special = 0; 2037 p->rcu_read_unlock_special.s = 0;
2011#ifdef CONFIG_TREE_PREEMPT_RCU
2012 p->rcu_blocked_node = NULL; 2038 p->rcu_blocked_node = NULL;
2013#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2014 INIT_LIST_HEAD(&p->rcu_node_entry); 2039 INIT_LIST_HEAD(&p->rcu_node_entry);
2040#endif /* #ifdef CONFIG_PREEMPT_RCU */
2041#ifdef CONFIG_TASKS_RCU
2042 p->rcu_tasks_holdout = false;
2043 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2044 p->rcu_tasks_idle_cpu = -1;
2045#endif /* #ifdef CONFIG_TASKS_RCU */
2015} 2046}
2016 2047
2017#else
2018
2019static inline void rcu_copy_process(struct task_struct *p)
2020{
2021}
2022
2023#endif
2024
2025static inline void tsk_restore_flags(struct task_struct *task, 2048static inline void tsk_restore_flags(struct task_struct *task,
2026 unsigned long orig_flags, unsigned long flags) 2049 unsigned long orig_flags, unsigned long flags)
2027{ 2050{
@@ -2608,12 +2631,27 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
2608 task_thread_info(p)->task = p; 2631 task_thread_info(p)->task = p;
2609} 2632}
2610 2633
2634/*
2635 * Return the address of the last usable long on the stack.
2636 *
2637 * When the stack grows down, this is just above the thread
2638 * info struct. Going any lower will corrupt the threadinfo.
2639 *
2640 * When the stack grows up, this is the highest address.
2641 * Beyond that position, we corrupt data on the next page.
2642 */
2611static inline unsigned long *end_of_stack(struct task_struct *p) 2643static inline unsigned long *end_of_stack(struct task_struct *p)
2612{ 2644{
2645#ifdef CONFIG_STACK_GROWSUP
2646 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2647#else
2613 return (unsigned long *)(task_thread_info(p) + 1); 2648 return (unsigned long *)(task_thread_info(p) + 1);
2649#endif
2614} 2650}
2615 2651
2616#endif 2652#endif
2653#define task_stack_end_corrupted(task) \
2654 (*(end_of_stack(task)) != STACK_END_MAGIC)
2617 2655
2618static inline int object_is_on_stack(void *obj) 2656static inline int object_is_on_stack(void *obj)
2619{ 2657{
@@ -2636,6 +2674,7 @@ static inline unsigned long stack_not_used(struct task_struct *p)
2636 return (unsigned long)n - (unsigned long)end_of_stack(p); 2674 return (unsigned long)n - (unsigned long)end_of_stack(p);
2637} 2675}
2638#endif 2676#endif
2677extern void set_task_stack_end_magic(struct task_struct *tsk);
2639 2678
2640/* set thread flags in other task's structures 2679/* set thread flags in other task's structures
2641 * - see asm/thread_info.h for TIF_xxxx flags available 2680 * - see asm/thread_info.h for TIF_xxxx flags available
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 005bf3e38db5..f0f8bad54be9 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -5,12 +5,4 @@
5 5
6extern struct screen_info screen_info; 6extern struct screen_info screen_info;
7 7
8#define ORIG_X (screen_info.orig_x)
9#define ORIG_Y (screen_info.orig_y)
10#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
11#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
12#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
13#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
14#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
15#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
16#endif /* _SCREEN_INFO_H */ 8#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 623f90e5f38d..ba96471c11ba 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1559,7 +1559,7 @@ struct security_operations {
1559 int (*file_lock) (struct file *file, unsigned int cmd); 1559 int (*file_lock) (struct file *file, unsigned int cmd);
1560 int (*file_fcntl) (struct file *file, unsigned int cmd, 1560 int (*file_fcntl) (struct file *file, unsigned int cmd,
1561 unsigned long arg); 1561 unsigned long arg);
1562 int (*file_set_fowner) (struct file *file); 1562 void (*file_set_fowner) (struct file *file);
1563 int (*file_send_sigiotask) (struct task_struct *tsk, 1563 int (*file_send_sigiotask) (struct task_struct *tsk,
1564 struct fown_struct *fown, int sig); 1564 struct fown_struct *fown, int sig);
1565 int (*file_receive) (struct file *file); 1565 int (*file_receive) (struct file *file);
@@ -1834,7 +1834,7 @@ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
1834 unsigned long prot); 1834 unsigned long prot);
1835int security_file_lock(struct file *file, unsigned int cmd); 1835int security_file_lock(struct file *file, unsigned int cmd);
1836int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); 1836int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
1837int security_file_set_fowner(struct file *file); 1837void security_file_set_fowner(struct file *file);
1838int security_file_send_sigiotask(struct task_struct *tsk, 1838int security_file_send_sigiotask(struct task_struct *tsk,
1839 struct fown_struct *fown, int sig); 1839 struct fown_struct *fown, int sig);
1840int security_file_receive(struct file *file); 1840int security_file_receive(struct file *file);
@@ -2108,7 +2108,7 @@ static inline int security_dentry_init_security(struct dentry *dentry,
2108static inline int security_inode_init_security(struct inode *inode, 2108static inline int security_inode_init_security(struct inode *inode,
2109 struct inode *dir, 2109 struct inode *dir,
2110 const struct qstr *qstr, 2110 const struct qstr *qstr,
2111 const initxattrs initxattrs, 2111 const initxattrs xattrs,
2112 void *fs_data) 2112 void *fs_data)
2113{ 2113{
2114 return 0; 2114 return 0;
@@ -2312,9 +2312,9 @@ static inline int security_file_fcntl(struct file *file, unsigned int cmd,
2312 return 0; 2312 return 0;
2313} 2313}
2314 2314
2315static inline int security_file_set_fowner(struct file *file) 2315static inline void security_file_set_fowner(struct file *file)
2316{ 2316{
2317 return 0; 2317 return;
2318} 2318}
2319 2319
2320static inline int security_file_send_sigiotask(struct task_struct *tsk, 2320static inline int security_file_send_sigiotask(struct task_struct *tsk,
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cc359636cfa3..f5df8f687b4d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -456,4 +456,23 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
456 spin_unlock_irqrestore(&sl->lock, flags); 456 spin_unlock_irqrestore(&sl->lock, flags);
457} 457}
458 458
459static inline unsigned long
460read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
461{
462 unsigned long flags = 0;
463
464 if (!(*seq & 1)) /* Even */
465 *seq = read_seqbegin(lock);
466 else /* Odd */
467 read_seqlock_excl_irqsave(lock, flags);
468
469 return flags;
470}
471
472static inline void
473done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
474{
475 if (seq & 1)
476 read_sequnlock_excl_irqrestore(lock, flags);
477}
459#endif /* __LINUX_SEQLOCK_H */ 478#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
index 3d6003de4b0d..a1ba6a5ccdd6 100644
--- a/include/linux/seqno-fence.h
+++ b/include/linux/seqno-fence.h
@@ -62,6 +62,7 @@ to_seqno_fence(struct fence *fence)
62 * @context: the execution context this fence is a part of 62 * @context: the execution context this fence is a part of
63 * @seqno_ofs: the offset within @sync_buf 63 * @seqno_ofs: the offset within @sync_buf
64 * @seqno: the sequence # to signal on 64 * @seqno: the sequence # to signal on
65 * @cond: fence wait condition
65 * @ops: the fence_ops for operations on this seqno fence 66 * @ops: the fence_ops for operations on this seqno fence
66 * 67 *
67 * This function initializes a struct seqno_fence with passed parameters, 68 * This function initializes a struct seqno_fence with passed parameters,
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index f93649e22c43..3df10d5f154b 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -84,6 +84,7 @@ struct uart_8250_port {
84 unsigned char mcr_mask; /* mask of user bits */ 84 unsigned char mcr_mask; /* mask of user bits */
85 unsigned char mcr_force; /* mask of forced bits */ 85 unsigned char mcr_force; /* mask of forced bits */
86 unsigned char cur_iotype; /* Running I/O type */ 86 unsigned char cur_iotype; /* Running I/O type */
87 unsigned int rpm_tx_active;
87 88
88 /* 89 /*
89 * Some bits in registers are cleared on a read, so they must 90 * Some bits in registers are cleared on a read, so they must
@@ -96,10 +97,13 @@ struct uart_8250_port {
96 unsigned char msr_saved_flags; 97 unsigned char msr_saved_flags;
97 98
98 struct uart_8250_dma *dma; 99 struct uart_8250_dma *dma;
100 struct serial_rs485 rs485;
99 101
100 /* 8250 specific callbacks */ 102 /* 8250 specific callbacks */
101 int (*dl_read)(struct uart_8250_port *); 103 int (*dl_read)(struct uart_8250_port *);
102 void (*dl_write)(struct uart_8250_port *, int); 104 void (*dl_write)(struct uart_8250_port *, int);
105 int (*rs485_config)(struct uart_8250_port *,
106 struct serial_rs485 *rs485);
103}; 107};
104 108
105static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) 109static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
@@ -121,6 +125,8 @@ extern void serial8250_early_out(struct uart_port *port, int offset, int value);
121extern int setup_early_serial8250_console(char *cmdline); 125extern int setup_early_serial8250_console(char *cmdline);
122extern void serial8250_do_set_termios(struct uart_port *port, 126extern void serial8250_do_set_termios(struct uart_port *port,
123 struct ktermios *termios, struct ktermios *old); 127 struct ktermios *termios, struct ktermios *old);
128extern int serial8250_do_startup(struct uart_port *port);
129extern void serial8250_do_shutdown(struct uart_port *port);
124extern void serial8250_do_pm(struct uart_port *port, unsigned int state, 130extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
125 unsigned int oldstate); 131 unsigned int oldstate);
126extern int fsl8250_handle_irq(struct uart_port *port); 132extern int fsl8250_handle_irq(struct uart_port *port);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index cf3a1e789bf5..21c2e05c1bc3 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -112,6 +112,7 @@ struct uart_icount {
112}; 112};
113 113
114typedef unsigned int __bitwise__ upf_t; 114typedef unsigned int __bitwise__ upf_t;
115typedef unsigned int __bitwise__ upstat_t;
115 116
116struct uart_port { 117struct uart_port {
117 spinlock_t lock; /* port lock */ 118 spinlock_t lock; /* port lock */
@@ -122,6 +123,10 @@ struct uart_port {
122 void (*set_termios)(struct uart_port *, 123 void (*set_termios)(struct uart_port *,
123 struct ktermios *new, 124 struct ktermios *new,
124 struct ktermios *old); 125 struct ktermios *old);
126 int (*startup)(struct uart_port *port);
127 void (*shutdown)(struct uart_port *port);
128 void (*throttle)(struct uart_port *port);
129 void (*unthrottle)(struct uart_port *port);
125 int (*handle_irq)(struct uart_port *); 130 int (*handle_irq)(struct uart_port *);
126 void (*pm)(struct uart_port *, unsigned int state, 131 void (*pm)(struct uart_port *, unsigned int state,
127 unsigned int old); 132 unsigned int old);
@@ -152,6 +157,7 @@ struct uart_port {
152 unsigned long sysrq; /* sysrq timeout */ 157 unsigned long sysrq; /* sysrq timeout */
153#endif 158#endif
154 159
160 /* flags must be updated while holding port mutex */
155 upf_t flags; 161 upf_t flags;
156 162
157#define UPF_FOURPORT ((__force upf_t) (1 << 1)) 163#define UPF_FOURPORT ((__force upf_t) (1 << 1))
@@ -187,6 +193,13 @@ struct uart_port {
187#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff)) 193#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff))
188#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) 194#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY))
189 195
196 /* status must be updated while holding port lock */
197 upstat_t status;
198
199#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
200#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
201
202 int hw_stopped; /* sw-assisted CTS flow state */
190 unsigned int mctrl; /* current modem ctrl settings */ 203 unsigned int mctrl; /* current modem ctrl settings */
191 unsigned int timeout; /* character-based timeout */ 204 unsigned int timeout; /* character-based timeout */
192 unsigned int type; /* port type */ 205 unsigned int type; /* port type */
@@ -347,11 +360,16 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
347static inline int uart_tx_stopped(struct uart_port *port) 360static inline int uart_tx_stopped(struct uart_port *port)
348{ 361{
349 struct tty_struct *tty = port->state->port.tty; 362 struct tty_struct *tty = port->state->port.tty;
350 if(tty->stopped || tty->hw_stopped) 363 if (tty->stopped || port->hw_stopped)
351 return 1; 364 return 1;
352 return 0; 365 return 0;
353} 366}
354 367
368static inline bool uart_cts_enabled(struct uart_port *uport)
369{
370 return uport->status & UPSTAT_CTS_ENABLE;
371}
372
355/* 373/*
356 * The following are helper functions for the low level drivers. 374 * The following are helper functions for the low level drivers.
357 */ 375 */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index abde271c18ae..3ab0749d6875 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -28,7 +28,6 @@
28#include <linux/textsearch.h> 28#include <linux/textsearch.h>
29#include <net/checksum.h> 29#include <net/checksum.h>
30#include <linux/rcupdate.h> 30#include <linux/rcupdate.h>
31#include <linux/dmaengine.h>
32#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h> 33#include <linux/netdev_features.h>
@@ -47,11 +46,29 @@
47 * 46 *
48 * The hardware you're dealing with doesn't calculate the full checksum 47 * The hardware you're dealing with doesn't calculate the full checksum
49 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums 48 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
50 * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will 49 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
51 * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still 50 * if their checksums are okay. skb->csum is still undefined in this case
52 * undefined in this case though. It is a bad option, but, unfortunately, 51 * though. It is a bad option, but, unfortunately, nowadays most vendors do
53 * nowadays most vendors do this. Apparently with the secret goal to sell 52 * this. Apparently with the secret goal to sell you new devices, when you
54 * you new devices, when you will add new protocol to your host, f.e. IPv6 8) 53 * will add new protocol to your host, f.e. IPv6 8)
54 *
55 * CHECKSUM_UNNECESSARY is applicable to following protocols:
56 * TCP: IPv6 and IPv4.
57 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
58 * zero UDP checksum for either IPv4 or IPv6, the networking stack
59 * may perform further validation in this case.
60 * GRE: only if the checksum is present in the header.
61 * SCTP: indicates the CRC in SCTP header has been validated.
62 *
63 * skb->csum_level indicates the number of consecutive checksums found in
64 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
65 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
66 * and a device is able to verify the checksums for UDP (possibly zero),
67 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
68 * two. If the device were only able to verify the UDP checksum and not
69 * GRE, either because it doesn't support GRE checksum of because GRE
70 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
71 * not considered in this case).
55 * 72 *
56 * CHECKSUM_COMPLETE: 73 * CHECKSUM_COMPLETE:
57 * 74 *
@@ -112,6 +129,9 @@
112#define CHECKSUM_COMPLETE 2 129#define CHECKSUM_COMPLETE 2
113#define CHECKSUM_PARTIAL 3 130#define CHECKSUM_PARTIAL 3
114 131
132/* Maximum value in skb->csum_level */
133#define SKB_MAX_CSUM_LEVEL 3
134
115#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) 135#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
116#define SKB_WITH_OVERHEAD(X) \ 136#define SKB_WITH_OVERHEAD(X) \
117 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 137 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -135,7 +155,7 @@ struct nf_conntrack {
135}; 155};
136#endif 156#endif
137 157
138#ifdef CONFIG_BRIDGE_NETFILTER 158#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
139struct nf_bridge_info { 159struct nf_bridge_info {
140 atomic_t use; 160 atomic_t use;
141 unsigned int mask; 161 unsigned int mask;
@@ -318,9 +338,10 @@ struct skb_shared_info {
318 338
319 339
320enum { 340enum {
321 SKB_FCLONE_UNAVAILABLE, 341 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
322 SKB_FCLONE_ORIG, 342 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
323 SKB_FCLONE_CLONE, 343 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
344 SKB_FCLONE_FREE, /* this companion fclone skb is available */
324}; 345};
325 346
326enum { 347enum {
@@ -452,6 +473,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
452 * @tc_verd: traffic control verdict 473 * @tc_verd: traffic control verdict
453 * @hash: the packet hash 474 * @hash: the packet hash
454 * @queue_mapping: Queue mapping for multiqueue devices 475 * @queue_mapping: Queue mapping for multiqueue devices
476 * @xmit_more: More SKBs are pending for this queue
455 * @ndisc_nodetype: router type (from link layer) 477 * @ndisc_nodetype: router type (from link layer)
456 * @ooo_okay: allow the mapping of a socket to a queue to be changed 478 * @ooo_okay: allow the mapping of a socket to a queue to be changed
457 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 479 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -460,8 +482,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
460 * @wifi_acked_valid: wifi_acked was set 482 * @wifi_acked_valid: wifi_acked was set
461 * @wifi_acked: whether frame was acked on wifi or not 483 * @wifi_acked: whether frame was acked on wifi or not
462 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 484 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
463 * @dma_cookie: a cookie to one of several possible DMA operations
464 * done by skb DMA functions
465 * @napi_id: id of the NAPI struct this skb came from 485 * @napi_id: id of the NAPI struct this skb came from
466 * @secmark: security marking 486 * @secmark: security marking
467 * @mark: Generic packet mark 487 * @mark: Generic packet mark
@@ -505,87 +525,99 @@ struct sk_buff {
505 char cb[48] __aligned(8); 525 char cb[48] __aligned(8);
506 526
507 unsigned long _skb_refdst; 527 unsigned long _skb_refdst;
528 void (*destructor)(struct sk_buff *skb);
508#ifdef CONFIG_XFRM 529#ifdef CONFIG_XFRM
509 struct sec_path *sp; 530 struct sec_path *sp;
510#endif 531#endif
532#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
533 struct nf_conntrack *nfct;
534#endif
535#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
536 struct nf_bridge_info *nf_bridge;
537#endif
511 unsigned int len, 538 unsigned int len,
512 data_len; 539 data_len;
513 __u16 mac_len, 540 __u16 mac_len,
514 hdr_len; 541 hdr_len;
515 union { 542
516 __wsum csum; 543 /* Following fields are _not_ copied in __copy_skb_header()
517 struct { 544 * Note that queue_mapping is here mostly to fill a hole.
518 __u16 csum_start; 545 */
519 __u16 csum_offset;
520 };
521 };
522 __u32 priority;
523 kmemcheck_bitfield_begin(flags1); 546 kmemcheck_bitfield_begin(flags1);
524 __u8 ignore_df:1, 547 __u16 queue_mapping;
525 cloned:1, 548 __u8 cloned:1,
526 ip_summed:2,
527 nohdr:1, 549 nohdr:1,
528 nfctinfo:3;
529 __u8 pkt_type:3,
530 fclone:2, 550 fclone:2,
531 ipvs_property:1,
532 peeked:1, 551 peeked:1,
533 nf_trace:1; 552 head_frag:1,
553 xmit_more:1;
554 /* one bit hole */
534 kmemcheck_bitfield_end(flags1); 555 kmemcheck_bitfield_end(flags1);
535 __be16 protocol;
536
537 void (*destructor)(struct sk_buff *skb);
538#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
539 struct nf_conntrack *nfct;
540#endif
541#ifdef CONFIG_BRIDGE_NETFILTER
542 struct nf_bridge_info *nf_bridge;
543#endif
544
545 int skb_iif;
546
547 __u32 hash;
548 556
549 __be16 vlan_proto; 557 /* fields enclosed in headers_start/headers_end are copied
550 __u16 vlan_tci; 558 * using a single memcpy() in __copy_skb_header()
559 */
560 __u32 headers_start[0];
551 561
552#ifdef CONFIG_NET_SCHED 562/* if you move pkt_type around you also must adapt those constants */
553 __u16 tc_index; /* traffic control index */ 563#ifdef __BIG_ENDIAN_BITFIELD
554#ifdef CONFIG_NET_CLS_ACT 564#define PKT_TYPE_MAX (7 << 5)
555 __u16 tc_verd; /* traffic control verdict */ 565#else
556#endif 566#define PKT_TYPE_MAX 7
557#endif 567#endif
568#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
558 569
559 __u16 queue_mapping; 570 __u8 __pkt_type_offset[0];
560 kmemcheck_bitfield_begin(flags2); 571 __u8 pkt_type:3;
561#ifdef CONFIG_IPV6_NDISC_NODETYPE
562 __u8 ndisc_nodetype:2;
563#endif
564 __u8 pfmemalloc:1; 572 __u8 pfmemalloc:1;
573 __u8 ignore_df:1;
574 __u8 nfctinfo:3;
575
576 __u8 nf_trace:1;
577 __u8 ip_summed:2;
565 __u8 ooo_okay:1; 578 __u8 ooo_okay:1;
566 __u8 l4_hash:1; 579 __u8 l4_hash:1;
567 __u8 sw_hash:1; 580 __u8 sw_hash:1;
568 __u8 wifi_acked_valid:1; 581 __u8 wifi_acked_valid:1;
569 __u8 wifi_acked:1; 582 __u8 wifi_acked:1;
583
570 __u8 no_fcs:1; 584 __u8 no_fcs:1;
571 __u8 head_frag:1; 585 /* Indicates the inner headers are valid in the skbuff. */
572 /* Encapsulation protocol and NIC drivers should use
573 * this flag to indicate to each other if the skb contains
574 * encapsulated packet or not and maybe use the inner packet
575 * headers if needed
576 */
577 __u8 encapsulation:1; 586 __u8 encapsulation:1;
578 __u8 encap_hdr_csum:1; 587 __u8 encap_hdr_csum:1;
579 __u8 csum_valid:1; 588 __u8 csum_valid:1;
580 __u8 csum_complete_sw:1; 589 __u8 csum_complete_sw:1;
581 /* 2/4 bit hole (depending on ndisc_nodetype presence) */ 590 __u8 csum_level:2;
582 kmemcheck_bitfield_end(flags2); 591 __u8 csum_bad:1;
592
593#ifdef CONFIG_IPV6_NDISC_NODETYPE
594 __u8 ndisc_nodetype:2;
595#endif
596 __u8 ipvs_property:1;
597 __u8 inner_protocol_type:1;
598 /* 4 or 6 bit hole */
599
600#ifdef CONFIG_NET_SCHED
601 __u16 tc_index; /* traffic control index */
602#ifdef CONFIG_NET_CLS_ACT
603 __u16 tc_verd; /* traffic control verdict */
604#endif
605#endif
583 606
584#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
585 union { 607 union {
586 unsigned int napi_id; 608 __wsum csum;
587 dma_cookie_t dma_cookie; 609 struct {
610 __u16 csum_start;
611 __u16 csum_offset;
612 };
588 }; 613 };
614 __u32 priority;
615 int skb_iif;
616 __u32 hash;
617 __be16 vlan_proto;
618 __u16 vlan_tci;
619#ifdef CONFIG_NET_RX_BUSY_POLL
620 unsigned int napi_id;
589#endif 621#endif
590#ifdef CONFIG_NETWORK_SECMARK 622#ifdef CONFIG_NETWORK_SECMARK
591 __u32 secmark; 623 __u32 secmark;
@@ -596,13 +628,22 @@ struct sk_buff {
596 __u32 reserved_tailroom; 628 __u32 reserved_tailroom;
597 }; 629 };
598 630
599 __be16 inner_protocol; 631 union {
632 __be16 inner_protocol;
633 __u8 inner_ipproto;
634 };
635
600 __u16 inner_transport_header; 636 __u16 inner_transport_header;
601 __u16 inner_network_header; 637 __u16 inner_network_header;
602 __u16 inner_mac_header; 638 __u16 inner_mac_header;
639
640 __be16 protocol;
603 __u16 transport_header; 641 __u16 transport_header;
604 __u16 network_header; 642 __u16 network_header;
605 __u16 mac_header; 643 __u16 mac_header;
644
645 __u32 headers_end[0];
646
606 /* These elements must be at the end, see alloc_skb() for details. */ 647 /* These elements must be at the end, see alloc_skb() for details. */
607 sk_buff_data_t tail; 648 sk_buff_data_t tail;
608 sk_buff_data_t end; 649 sk_buff_data_t end;
@@ -734,6 +775,37 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
734 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 775 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
735} 776}
736 777
778struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
779 unsigned long data_len,
780 int max_page_order,
781 int *errcode,
782 gfp_t gfp_mask);
783
784/* Layout of fast clones : [skb1][skb2][fclone_ref] */
785struct sk_buff_fclones {
786 struct sk_buff skb1;
787
788 struct sk_buff skb2;
789
790 atomic_t fclone_ref;
791};
792
793/**
794 * skb_fclone_busy - check if fclone is busy
795 * @skb: buffer
796 *
797 * Returns true is skb is a fast clone, and its clone is not freed.
798 */
799static inline bool skb_fclone_busy(const struct sk_buff *skb)
800{
801 const struct sk_buff_fclones *fclones;
802
803 fclones = container_of(skb, struct sk_buff_fclones, skb1);
804
805 return skb->fclone == SKB_FCLONE_ORIG &&
806 fclones->skb2.fclone == SKB_FCLONE_CLONE;
807}
808
737static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 809static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
738 gfp_t priority) 810 gfp_t priority)
739{ 811{
@@ -1042,6 +1114,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
1042 * Drop a reference to the header part of the buffer. This is done 1114 * Drop a reference to the header part of the buffer. This is done
1043 * by acquiring a payload reference. You must not read from the header 1115 * by acquiring a payload reference. You must not read from the header
1044 * part of skb->data after this. 1116 * part of skb->data after this.
1117 * Note : Check if you can use __skb_header_release() instead.
1045 */ 1118 */
1046static inline void skb_header_release(struct sk_buff *skb) 1119static inline void skb_header_release(struct sk_buff *skb)
1047{ 1120{
@@ -1051,6 +1124,20 @@ static inline void skb_header_release(struct sk_buff *skb)
1051} 1124}
1052 1125
1053/** 1126/**
1127 * __skb_header_release - release reference to header
1128 * @skb: buffer to operate on
1129 *
1130 * Variant of skb_header_release() assuming skb is private to caller.
1131 * We can avoid one atomic operation.
1132 */
1133static inline void __skb_header_release(struct sk_buff *skb)
1134{
1135 skb->nohdr = 1;
1136 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1137}
1138
1139
1140/**
1054 * skb_shared - is the buffer shared 1141 * skb_shared - is the buffer shared
1055 * @skb: buffer to check 1142 * @skb: buffer to check
1056 * 1143 *
@@ -1675,6 +1762,23 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1675 skb->tail += len; 1762 skb->tail += len;
1676} 1763}
1677 1764
1765#define ENCAP_TYPE_ETHER 0
1766#define ENCAP_TYPE_IPPROTO 1
1767
1768static inline void skb_set_inner_protocol(struct sk_buff *skb,
1769 __be16 protocol)
1770{
1771 skb->inner_protocol = protocol;
1772 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
1773}
1774
1775static inline void skb_set_inner_ipproto(struct sk_buff *skb,
1776 __u8 ipproto)
1777{
1778 skb->inner_ipproto = ipproto;
1779 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
1780}
1781
1678static inline void skb_reset_inner_headers(struct sk_buff *skb) 1782static inline void skb_reset_inner_headers(struct sk_buff *skb)
1679{ 1783{
1680 skb->inner_mac_header = skb->mac_header; 1784 skb->inner_mac_header = skb->mac_header;
@@ -1860,18 +1964,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1860 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1964 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1861} 1965}
1862 1966
1863static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
1864{
1865 /* Only continue with checksum unnecessary if device indicated
1866 * it is valid across encapsulation (skb->encapsulation was set).
1867 */
1868 if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
1869 skb->ip_summed = CHECKSUM_NONE;
1870
1871 skb->encapsulation = 0;
1872 skb->csum_valid = 0;
1873}
1874
1875/* 1967/*
1876 * CPUs often take a performance hit when accessing unaligned memory 1968 * CPUs often take a performance hit when accessing unaligned memory
1877 * locations. The actual performance hit varies, it can be small if the 1969 * locations. The actual performance hit varies, it can be small if the
@@ -2567,20 +2659,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2567__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 2659__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2568 __wsum csum); 2660 __wsum csum);
2569 2661
2570static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2662static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
2571 int len, void *buffer) 2663 int len, void *data, int hlen, void *buffer)
2572{ 2664{
2573 int hlen = skb_headlen(skb);
2574
2575 if (hlen - offset >= len) 2665 if (hlen - offset >= len)
2576 return skb->data + offset; 2666 return data + offset;
2577 2667
2578 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2668 if (!skb ||
2669 skb_copy_bits(skb, offset, buffer, len) < 0)
2579 return NULL; 2670 return NULL;
2580 2671
2581 return buffer; 2672 return buffer;
2582} 2673}
2583 2674
2675static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2676 int len, void *buffer)
2677{
2678 return __skb_header_pointer(skb, offset, len, skb->data,
2679 skb_headlen(skb), buffer);
2680}
2681
2584/** 2682/**
2585 * skb_needs_linearize - check if we need to linearize a given skb 2683 * skb_needs_linearize - check if we need to linearize a given skb
2586 * depending on the given device features. 2684 * depending on the given device features.
@@ -2671,6 +2769,8 @@ static inline ktime_t net_invalid_timestamp(void)
2671 return ktime_set(0, 0); 2769 return ktime_set(0, 0);
2672} 2770}
2673 2771
2772struct sk_buff *skb_clone_sk(struct sk_buff *skb);
2773
2674#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2774#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2675 2775
2676void skb_clone_tx_timestamp(struct sk_buff *skb); 2776void skb_clone_tx_timestamp(struct sk_buff *skb);
@@ -2786,6 +2886,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2786 0 : __skb_checksum_complete(skb); 2886 0 : __skb_checksum_complete(skb);
2787} 2887}
2788 2888
2889static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
2890{
2891 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2892 if (skb->csum_level == 0)
2893 skb->ip_summed = CHECKSUM_NONE;
2894 else
2895 skb->csum_level--;
2896 }
2897}
2898
2899static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
2900{
2901 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2902 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
2903 skb->csum_level++;
2904 } else if (skb->ip_summed == CHECKSUM_NONE) {
2905 skb->ip_summed = CHECKSUM_UNNECESSARY;
2906 skb->csum_level = 0;
2907 }
2908}
2909
2910static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
2911{
2912 /* Mark current checksum as bad (typically called from GRO
2913 * path). In the case that ip_summed is CHECKSUM_NONE
2914 * this must be the first checksum encountered in the packet.
2915 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
2916 * checksum after the last one validated. For UDP, a zero
2917 * checksum can not be marked as bad.
2918 */
2919
2920 if (skb->ip_summed == CHECKSUM_NONE ||
2921 skb->ip_summed == CHECKSUM_UNNECESSARY)
2922 skb->csum_bad = 1;
2923}
2924
2789/* Check if we need to perform checksum complete validation. 2925/* Check if we need to perform checksum complete validation.
2790 * 2926 *
2791 * Returns true if checksum complete is needed, false otherwise 2927 * Returns true if checksum complete is needed, false otherwise
@@ -2797,6 +2933,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
2797{ 2933{
2798 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { 2934 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
2799 skb->csum_valid = 1; 2935 skb->csum_valid = 1;
2936 __skb_decr_checksum_unnecessary(skb);
2800 return false; 2937 return false;
2801 } 2938 }
2802 2939
@@ -2826,6 +2963,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
2826 skb->csum_valid = 1; 2963 skb->csum_valid = 1;
2827 return 0; 2964 return 0;
2828 } 2965 }
2966 } else if (skb->csum_bad) {
2967 /* ip_summed == CHECKSUM_NONE in this case */
2968 return 1;
2829 } 2969 }
2830 2970
2831 skb->csum = psum; 2971 skb->csum = psum;
@@ -2883,6 +3023,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
2883#define skb_checksum_simple_validate(skb) \ 3023#define skb_checksum_simple_validate(skb) \
2884 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) 3024 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
2885 3025
3026static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3027{
3028 return (skb->ip_summed == CHECKSUM_NONE &&
3029 skb->csum_valid && !skb->csum_bad);
3030}
3031
3032static inline void __skb_checksum_convert(struct sk_buff *skb,
3033 __sum16 check, __wsum pseudo)
3034{
3035 skb->csum = ~pseudo;
3036 skb->ip_summed = CHECKSUM_COMPLETE;
3037}
3038
3039#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3040do { \
3041 if (__skb_checksum_convert_check(skb)) \
3042 __skb_checksum_convert(skb, check, \
3043 compute_pseudo(skb, proto)); \
3044} while (0)
3045
2886#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3046#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2887void nf_conntrack_destroy(struct nf_conntrack *nfct); 3047void nf_conntrack_destroy(struct nf_conntrack *nfct);
2888static inline void nf_conntrack_put(struct nf_conntrack *nfct) 3048static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -2896,7 +3056,7 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2896 atomic_inc(&nfct->use); 3056 atomic_inc(&nfct->use);
2897} 3057}
2898#endif 3058#endif
2899#ifdef CONFIG_BRIDGE_NETFILTER 3059#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2900static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 3060static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2901{ 3061{
2902 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 3062 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
@@ -2914,7 +3074,7 @@ static inline void nf_reset(struct sk_buff *skb)
2914 nf_conntrack_put(skb->nfct); 3074 nf_conntrack_put(skb->nfct);
2915 skb->nfct = NULL; 3075 skb->nfct = NULL;
2916#endif 3076#endif
2917#ifdef CONFIG_BRIDGE_NETFILTER 3077#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2918 nf_bridge_put(skb->nf_bridge); 3078 nf_bridge_put(skb->nf_bridge);
2919 skb->nf_bridge = NULL; 3079 skb->nf_bridge = NULL;
2920#endif 3080#endif
@@ -2928,19 +3088,22 @@ static inline void nf_reset_trace(struct sk_buff *skb)
2928} 3088}
2929 3089
2930/* Note: This doesn't put any conntrack and bridge info in dst. */ 3090/* Note: This doesn't put any conntrack and bridge info in dst. */
2931static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 3091static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3092 bool copy)
2932{ 3093{
2933#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3094#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2934 dst->nfct = src->nfct; 3095 dst->nfct = src->nfct;
2935 nf_conntrack_get(src->nfct); 3096 nf_conntrack_get(src->nfct);
2936 dst->nfctinfo = src->nfctinfo; 3097 if (copy)
3098 dst->nfctinfo = src->nfctinfo;
2937#endif 3099#endif
2938#ifdef CONFIG_BRIDGE_NETFILTER 3100#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2939 dst->nf_bridge = src->nf_bridge; 3101 dst->nf_bridge = src->nf_bridge;
2940 nf_bridge_get(src->nf_bridge); 3102 nf_bridge_get(src->nf_bridge);
2941#endif 3103#endif
2942#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 3104#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
2943 dst->nf_trace = src->nf_trace; 3105 if (copy)
3106 dst->nf_trace = src->nf_trace;
2944#endif 3107#endif
2945} 3108}
2946 3109
@@ -2949,10 +3112,10 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2949#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3112#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2950 nf_conntrack_put(dst->nfct); 3113 nf_conntrack_put(dst->nfct);
2951#endif 3114#endif
2952#ifdef CONFIG_BRIDGE_NETFILTER 3115#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2953 nf_bridge_put(dst->nf_bridge); 3116 nf_bridge_put(dst->nf_bridge);
2954#endif 3117#endif
2955 __nf_copy(dst, src); 3118 __nf_copy(dst, src, true);
2956} 3119}
2957 3120
2958#ifdef CONFIG_NETWORK_SECMARK 3121#ifdef CONFIG_NETWORK_SECMARK
@@ -3137,7 +3300,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3137 3300
3138int skb_checksum_setup(struct sk_buff *skb, bool recalculate); 3301int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3139 3302
3140u32 __skb_get_poff(const struct sk_buff *skb); 3303u32 skb_get_poff(const struct sk_buff *skb);
3304u32 __skb_get_poff(const struct sk_buff *skb, void *data,
3305 const struct flow_keys *keys, int hlen);
3141 3306
3142/** 3307/**
3143 * skb_head_is_locked - Determine if the skb->head is locked down 3308 * skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1d9abb7d22a0..c265bec6a57d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,31 +158,6 @@ size_t ksize(const void *);
158#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 158#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
159#endif 159#endif
160 160
161#ifdef CONFIG_SLOB
162/*
163 * Common fields provided in kmem_cache by all slab allocators
164 * This struct is either used directly by the allocator (SLOB)
165 * or the allocator must include definitions for all fields
166 * provided in kmem_cache_common in their definition of kmem_cache.
167 *
168 * Once we can do anonymous structs (C11 standard) we could put a
169 * anonymous struct definition in these allocators so that the
170 * separate allocations in the kmem_cache structure of SLAB and
171 * SLUB is no longer needed.
172 */
173struct kmem_cache {
174 unsigned int object_size;/* The original size of the object */
175 unsigned int size; /* The aligned/padded/added on size */
176 unsigned int align; /* Alignment as calculated */
177 unsigned long flags; /* Active flags on the slab */
178 const char *name; /* Slab name for sysfs */
179 int refcount; /* Use counter */
180 void (*ctor)(void *); /* Called on object slot creation */
181 struct list_head list; /* List of all slab caches on the system */
182};
183
184#endif /* CONFIG_SLOB */
185
186/* 161/*
187 * Kmalloc array related definitions 162 * Kmalloc array related definitions
188 */ 163 */
@@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
363} 338}
364#endif /* CONFIG_TRACING */ 339#endif /* CONFIG_TRACING */
365 340
366#ifdef CONFIG_SLAB
367#include <linux/slab_def.h>
368#endif
369
370#ifdef CONFIG_SLUB
371#include <linux/slub_def.h>
372#endif
373
374extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 341extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
375 342
376#ifdef CONFIG_TRACING 343#ifdef CONFIG_TRACING
@@ -582,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
582 * allocator where we care about the real place the memory allocation 549 * allocator where we care about the real place the memory allocation
583 * request comes from. 550 * request comes from.
584 */ 551 */
585#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
586 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
587 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
588extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 552extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
589#define kmalloc_track_caller(size, flags) \ 553#define kmalloc_track_caller(size, flags) \
590 __kmalloc_track_caller(size, flags, _RET_IP_) 554 __kmalloc_track_caller(size, flags, _RET_IP_)
591#else
592#define kmalloc_track_caller(size, flags) \
593 __kmalloc(size, flags)
594#endif /* DEBUG_SLAB */
595 555
596#ifdef CONFIG_NUMA 556#ifdef CONFIG_NUMA
597/*
598 * kmalloc_node_track_caller is a special version of kmalloc_node that
599 * records the calling function of the routine calling it for slab leak
600 * tracking instead of just the calling function (confusing, eh?).
601 * It's useful when the call to kmalloc_node comes from a widely-used
602 * standard allocator where we care about the real place the memory
603 * allocation request comes from.
604 */
605#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
606 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
607 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
608extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 557extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
609#define kmalloc_node_track_caller(size, flags, node) \ 558#define kmalloc_node_track_caller(size, flags, node) \
610 __kmalloc_node_track_caller(size, flags, node, \ 559 __kmalloc_node_track_caller(size, flags, node, \
611 _RET_IP_) 560 _RET_IP_)
612#else
613#define kmalloc_node_track_caller(size, flags, node) \
614 __kmalloc_node(size, flags, node)
615#endif
616 561
617#else /* CONFIG_NUMA */ 562#else /* CONFIG_NUMA */
618 563
@@ -650,14 +595,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
650 return kmalloc_node(size, flags | __GFP_ZERO, node); 595 return kmalloc_node(size, flags | __GFP_ZERO, node);
651} 596}
652 597
653/* 598unsigned int kmem_cache_size(struct kmem_cache *s);
654 * Determine the size of a slab object
655 */
656static inline unsigned int kmem_cache_size(struct kmem_cache *s)
657{
658 return s->object_size;
659}
660
661void __init kmem_cache_init_late(void); 599void __init kmem_cache_init_late(void);
662 600
663#endif /* _LINUX_SLAB_H */ 601#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8235dfbb3b05..b869d1662ba3 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -8,6 +8,8 @@
8 */ 8 */
9 9
10struct kmem_cache { 10struct kmem_cache {
11 struct array_cache __percpu *cpu_cache;
12
11/* 1) Cache tunables. Protected by slab_mutex */ 13/* 1) Cache tunables. Protected by slab_mutex */
12 unsigned int batchcount; 14 unsigned int batchcount;
13 unsigned int limit; 15 unsigned int limit;
@@ -71,23 +73,7 @@ struct kmem_cache {
71 struct memcg_cache_params *memcg_params; 73 struct memcg_cache_params *memcg_params;
72#endif 74#endif
73 75
74/* 6) per-cpu/per-node data, touched during every alloc/free */ 76 struct kmem_cache_node *node[MAX_NUMNODES];
75 /*
76 * We put array[] at the end of kmem_cache, because we want to size
77 * this array to nr_cpu_ids slots instead of NR_CPUS
78 * (see kmem_cache_init())
79 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
80 * is statically defined, so we reserve the max number of cpus.
81 *
82 * We also need to guarantee that the list is able to accomodate a
83 * pointer for each node since "nodelists" uses the remainder of
84 * available pointers.
85 */
86 struct kmem_cache_node **node;
87 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
88 /*
89 * Do not add fields after array[]
90 */
91}; 77};
92 78
93#endif /* _LINUX_SLAB_DEF_H */ 79#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 34347f26be9b..93dff5fff524 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -100,6 +100,7 @@ int smp_call_function_any(const struct cpumask *mask,
100 smp_call_func_t func, void *info, int wait); 100 smp_call_func_t func, void *info, int wait);
101 101
102void kick_all_cpus_sync(void); 102void kick_all_cpus_sync(void);
103void wake_up_all_idle_cpus(void);
103 104
104/* 105/*
105 * Generic and arch helpers 106 * Generic and arch helpers
@@ -148,6 +149,7 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
148} 149}
149 150
150static inline void kick_all_cpus_sync(void) { } 151static inline void kick_all_cpus_sync(void) { }
152static inline void wake_up_all_idle_cpus(void) { }
151 153
152#endif /* !SMP */ 154#endif /* !SMP */
153 155
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
new file mode 100644
index 000000000000..dad035c16d94
--- /dev/null
+++ b/include/linux/soc/ti/knav_dma.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright (C) 2014 Texas Instruments Incorporated
3 * Authors: Sandeep Nair <sandeep_n@ti.com
4 * Cyril Chemparathy <cyril@ti.com
5 Santosh Shilimkar <santosh.shilimkar@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
18#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
19
20/*
21 * PKTDMA descriptor manipulation macros for host packet descriptor
22 */
23#define MASK(x) (BIT(x) - 1)
24#define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22)
25#define KNAV_DMA_DESC_PKT_LEN_SHIFT 0
26#define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22)
27#define KNAV_DMA_DESC_PS_INFO_IN_DESC 0
28#define KNAV_DMA_DESC_TAG_MASK MASK(8)
29#define KNAV_DMA_DESC_SAG_HI_SHIFT 24
30#define KNAV_DMA_DESC_STAG_LO_SHIFT 16
31#define KNAV_DMA_DESC_DTAG_HI_SHIFT 8
32#define KNAV_DMA_DESC_DTAG_LO_SHIFT 0
33#define KNAV_DMA_DESC_HAS_EPIB BIT(31)
34#define KNAV_DMA_DESC_NO_EPIB 0
35#define KNAV_DMA_DESC_PSLEN_SHIFT 24
36#define KNAV_DMA_DESC_PSLEN_MASK MASK(6)
37#define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20
38#define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4)
39#define KNAV_DMA_DESC_PSFLAG_SHIFT 16
40#define KNAV_DMA_DESC_PSFLAG_MASK MASK(4)
41#define KNAV_DMA_DESC_RETQ_SHIFT 0
42#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
43#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
44
45#define KNAV_DMA_NUM_EPIB_WORDS 4
46#define KNAV_DMA_NUM_PS_WORDS 16
47#define KNAV_DMA_FDQ_PER_CHAN 4
48
49/* Tx channel scheduling priority */
50enum knav_dma_tx_priority {
51 DMA_PRIO_HIGH = 0,
52 DMA_PRIO_MED_H,
53 DMA_PRIO_MED_L,
54 DMA_PRIO_LOW
55};
56
57/* Rx channel error handling mode during buffer starvation */
58enum knav_dma_rx_err_mode {
59 DMA_DROP = 0,
60 DMA_RETRY
61};
62
63/* Rx flow size threshold configuration */
64enum knav_dma_rx_thresholds {
65 DMA_THRESH_NONE = 0,
66 DMA_THRESH_0 = 1,
67 DMA_THRESH_0_1 = 3,
68 DMA_THRESH_0_1_2 = 7
69};
70
71/* Descriptor type */
72enum knav_dma_desc_type {
73 DMA_DESC_HOST = 0,
74 DMA_DESC_MONOLITHIC = 2
75};
76
77/**
78 * struct knav_dma_tx_cfg: Tx channel configuration
79 * @filt_einfo: Filter extended packet info
80 * @filt_pswords: Filter PS words present
81 * @knav_dma_tx_priority: Tx channel scheduling priority
82 */
83struct knav_dma_tx_cfg {
84 bool filt_einfo;
85 bool filt_pswords;
86 enum knav_dma_tx_priority priority;
87};
88
89/**
90 * struct knav_dma_rx_cfg: Rx flow configuration
91 * @einfo_present: Extended packet info present
92 * @psinfo_present: PS words present
93 * @knav_dma_rx_err_mode: Error during buffer starvation
94 * @knav_dma_desc_type: Host or Monolithic desc
95 * @psinfo_at_sop: PS word located at start of packet
96 * @sop_offset: Start of packet offset
97 * @dst_q: Destination queue for a given flow
98 * @thresh: Rx flow size threshold
99 * @fdq[]: Free desc Queue array
100 * @sz_thresh0: RX packet size threshold 0
101 * @sz_thresh1: RX packet size threshold 1
102 * @sz_thresh2: RX packet size threshold 2
103 */
104struct knav_dma_rx_cfg {
105 bool einfo_present;
106 bool psinfo_present;
107 enum knav_dma_rx_err_mode err_mode;
108 enum knav_dma_desc_type desc_type;
109 bool psinfo_at_sop;
110 unsigned int sop_offset;
111 unsigned int dst_q;
112 enum knav_dma_rx_thresholds thresh;
113 unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN];
114 unsigned int sz_thresh0;
115 unsigned int sz_thresh1;
116 unsigned int sz_thresh2;
117};
118
119/**
120 * struct knav_dma_cfg: Pktdma channel configuration
121 * @sl_cfg: Slave configuration
122 * @tx: Tx channel configuration
123 * @rx: Rx flow configuration
124 */
125struct knav_dma_cfg {
126 enum dma_transfer_direction direction;
127 union {
128 struct knav_dma_tx_cfg tx;
129 struct knav_dma_rx_cfg rx;
130 } u;
131};
132
133/**
134 * struct knav_dma_desc: Host packet descriptor layout
135 * @desc_info: Descriptor information like id, type, length
136 * @tag_info: Flow tag info written in during RX
137 * @packet_info: Queue Manager, policy, flags etc
138 * @buff_len: Buffer length in bytes
139 * @buff: Buffer pointer
140 * @next_desc: For chaining the descriptors
141 * @orig_len: length since 'buff_len' can be overwritten
142 * @orig_buff: buff pointer since 'buff' can be overwritten
143 * @epib: Extended packet info block
144 * @psdata: Protocol specific
145 */
146struct knav_dma_desc {
147 u32 desc_info;
148 u32 tag_info;
149 u32 packet_info;
150 u32 buff_len;
151 u32 buff;
152 u32 next_desc;
153 u32 orig_len;
154 u32 orig_buff;
155 u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156 u32 psdata[KNAV_DMA_NUM_PS_WORDS];
157 u32 pad[4];
158} ____cacheline_aligned;
159
160#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
161void *knav_dma_open_channel(struct device *dev, const char *name,
162 struct knav_dma_cfg *config);
163void knav_dma_close_channel(void *channel);
164#else
165static inline void *knav_dma_open_channel(struct device *dev, const char *name,
166 struct knav_dma_cfg *config)
167{
168 return (void *) NULL;
169}
170static inline void knav_dma_close_channel(void *channel)
171{}
172
173#endif
174
175#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
new file mode 100644
index 000000000000..9f0ebb3bad27
--- /dev/null
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -0,0 +1,90 @@
1/*
2 * Keystone Navigator Queue Management Sub-System header
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5 * Author: Sandeep Nair <sandeep_n@ti.com>
6 * Cyril Chemparathy <cyril@ti.com>
7 * Santosh Shilimkar <santosh.shilimkar@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#ifndef __SOC_TI_KNAV_QMSS_H__
20#define __SOC_TI_KNAV_QMSS_H__
21
22#include <linux/err.h>
23#include <linux/time.h>
24#include <linux/atomic.h>
25#include <linux/device.h>
26#include <linux/fcntl.h>
27#include <linux/dma-mapping.h>
28
29/* queue types */
30#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */
31#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */
32#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */
33
34/* queue flags */
35#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */
36
37/**
38 * enum knav_queue_ctrl_cmd - queue operations.
39 * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue
40 * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible
41 * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle.
42 * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle.
43 * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle.
44 * @KNAV_QUEUE_GET_COUNT: Get number of queues.
45 */
46enum knav_queue_ctrl_cmd {
47 KNAV_QUEUE_GET_ID,
48 KNAV_QUEUE_FLUSH,
49 KNAV_QUEUE_SET_NOTIFIER,
50 KNAV_QUEUE_ENABLE_NOTIFY,
51 KNAV_QUEUE_DISABLE_NOTIFY,
52 KNAV_QUEUE_GET_COUNT
53};
54
55/* Queue notifier callback prototype */
56typedef void (*knav_queue_notify_fn)(void *arg);
57
58/**
59 * struct knav_queue_notify_config: Notifier configuration
60 * @fn: Notifier function
61 * @fn_arg: Notifier function arguments
62 */
63struct knav_queue_notify_config {
64 knav_queue_notify_fn fn;
65 void *fn_arg;
66};
67
68void *knav_queue_open(const char *name, unsigned id,
69 unsigned flags);
70void knav_queue_close(void *qhandle);
71int knav_queue_device_control(void *qhandle,
72 enum knav_queue_ctrl_cmd cmd,
73 unsigned long arg);
74dma_addr_t knav_queue_pop(void *qhandle, unsigned *size);
75int knav_queue_push(void *qhandle, dma_addr_t dma,
76 unsigned size, unsigned flags);
77
78void *knav_pool_create(const char *name,
79 int num_desc, int region_id);
80void knav_pool_destroy(void *ph);
81int knav_pool_count(void *ph);
82void *knav_pool_desc_get(void *ph);
83void knav_pool_desc_put(void *ph, void *desc);
84int knav_pool_desc_map(void *ph, void *desc, unsigned size,
85 dma_addr_t *dma, unsigned *dma_sz);
86void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz);
87dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt);
88void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma);
89
90#endif /* __SOC_TI_KNAV_QMSS_H__ */
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
index 2d676d5aaa89..aa07d7b32568 100644
--- a/include/linux/spi/mcp23s08.h
+++ b/include/linux/spi/mcp23s08.h
@@ -22,4 +22,22 @@ struct mcp23s08_platform_data {
22 * base to base+15 (or base+31 for s17 variant). 22 * base to base+15 (or base+31 for s17 variant).
23 */ 23 */
24 unsigned base; 24 unsigned base;
25 /* Marks the device as a interrupt controller.
26 * NOTE: The interrupt functionality is only supported for i2c
27 * versions of the chips. The spi chips can also do the interrupts,
28 * but this is not supported by the linux driver yet.
29 */
30 bool irq_controller;
31
32 /* Sets the mirror flag in the IOCON register. Devices
33 * with two interrupt outputs (these are the devices ending with 17 and
34 * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and
35 * IO 8-15 are bank 2. These chips have two different interrupt outputs:
36 * One for bank 1 and another for bank 2. If irq-mirror is set, both
37 * interrupts are generated regardless of the bank that an input change
38 * occurred on. If it is not set, the interrupt are only generated for
39 * the bank they belong to.
40 * On devices with only one interrupt output this property is useless.
41 */
42 bool mirror;
25}; 43};
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e713543336f1..46d188a9947c 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -253,6 +253,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
253 * the device whose settings are being modified. 253 * the device whose settings are being modified.
254 * @transfer: adds a message to the controller's transfer queue. 254 * @transfer: adds a message to the controller's transfer queue.
255 * @cleanup: frees controller-specific state 255 * @cleanup: frees controller-specific state
256 * @can_dma: determine whether this master supports DMA
256 * @queued: whether this master is providing an internal message queue 257 * @queued: whether this master is providing an internal message queue
257 * @kworker: thread struct for message pump 258 * @kworker: thread struct for message pump
258 * @kworker_task: pointer to task for message pump kworker thread 259 * @kworker_task: pointer to task for message pump kworker thread
@@ -262,6 +263,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
262 * @cur_msg: the currently in-flight message 263 * @cur_msg: the currently in-flight message
263 * @cur_msg_prepared: spi_prepare_message was called for the currently 264 * @cur_msg_prepared: spi_prepare_message was called for the currently
264 * in-flight message 265 * in-flight message
266 * @cur_msg_mapped: message has been mapped for DMA
265 * @xfer_completion: used by core transfer_one_message() 267 * @xfer_completion: used by core transfer_one_message()
266 * @busy: message pump is busy 268 * @busy: message pump is busy
267 * @running: message pump is running 269 * @running: message pump is running
@@ -299,6 +301,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
299 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 301 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
300 * number. Any individual value may be -ENOENT for CS lines that 302 * number. Any individual value may be -ENOENT for CS lines that
301 * are not GPIOs (driven by the SPI controller itself). 303 * are not GPIOs (driven by the SPI controller itself).
304 * @dma_tx: DMA transmit channel
305 * @dma_rx: DMA receive channel
306 * @dummy_rx: dummy receive buffer for full-duplex devices
307 * @dummy_tx: dummy transmit buffer for full-duplex devices
302 * 308 *
303 * Each SPI master controller can communicate with one or more @spi_device 309 * Each SPI master controller can communicate with one or more @spi_device
304 * children. These make a small bus, sharing MOSI, MISO and SCK signals 310 * children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -632,6 +638,7 @@ struct spi_transfer {
632 * addresses for each transfer buffer 638 * addresses for each transfer buffer
633 * @complete: called to report transaction completions 639 * @complete: called to report transaction completions
634 * @context: the argument to complete() when it's called 640 * @context: the argument to complete() when it's called
641 * @frame_length: the total number of bytes in the message
635 * @actual_length: the total number of bytes that were transferred in all 642 * @actual_length: the total number of bytes that were transferred in all
636 * successful segments 643 * successful segments
637 * @status: zero for success, else negative errno 644 * @status: zero for success, else negative errno
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3f2867ff0ced..262ba4ef9a8e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -197,7 +197,13 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
198 } while (0) 198 } while (0)
199#else 199#else
200# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) 200/*
201 * Always evaluate the 'subclass' argument to avoid that the compiler
202 * warns about set-but-not-used variables when building with
203 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
204 */
205# define raw_spin_lock_nested(lock, subclass) \
206 _raw_spin_lock(((void)(subclass), (lock)))
201# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 207# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
202#endif 208#endif
203 209
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cf61ecd148e0..21678464883a 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -280,7 +280,6 @@ struct svc_rqst {
280 bool rq_splice_ok; /* turned off in gss privacy 280 bool rq_splice_ok; /* turned off in gss privacy
281 * to prevent encrypting page 281 * to prevent encrypting page
282 * cache pages */ 282 * cache pages */
283 wait_queue_head_t rq_wait; /* synchronization */
284 struct task_struct *rq_task; /* service thread */ 283 struct task_struct *rq_task; /* service thread */
285}; 284};
286 285
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index fcbfe8783243..cf391eef2e6d 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -357,6 +357,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
357#define XPRT_CONNECTION_ABORT (7) 357#define XPRT_CONNECTION_ABORT (7)
358#define XPRT_CONNECTION_CLOSE (8) 358#define XPRT_CONNECTION_CLOSE (8)
359#define XPRT_CONGESTED (9) 359#define XPRT_CONGESTED (9)
360#define XPRT_CONNECTION_REUSE (10)
360 361
361static inline void xprt_set_connected(struct rpc_xprt *xprt) 362static inline void xprt_set_connected(struct rpc_xprt *xprt)
362{ 363{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 519064e0c943..3388c1b6f7d8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
189 189
190struct platform_freeze_ops { 190struct platform_freeze_ops {
191 int (*begin)(void); 191 int (*begin)(void);
192 int (*prepare)(void);
193 void (*restore)(void);
192 void (*end)(void); 194 void (*end)(void);
193}; 195};
194 196
@@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
371extern bool events_check_enabled; 373extern bool events_check_enabled;
372 374
373extern bool pm_wakeup_pending(void); 375extern bool pm_wakeup_pending(void);
376extern void pm_system_wakeup(void);
377extern void pm_wakeup_clear(void);
374extern bool pm_get_wakeup_count(unsigned int *count, bool block); 378extern bool pm_get_wakeup_count(unsigned int *count, bool block);
375extern bool pm_save_wakeup_count(unsigned int count); 379extern bool pm_save_wakeup_count(unsigned int count);
376extern void pm_wakep_autosleep_enabled(bool set); 380extern void pm_wakep_autosleep_enabled(bool set);
@@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
418#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 422#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
419 423
420static inline bool pm_wakeup_pending(void) { return false; } 424static inline bool pm_wakeup_pending(void) { return false; }
425static inline void pm_system_wakeup(void) {}
426static inline void pm_wakeup_clear(void) {}
421 427
422static inline void lock_system_sleep(void) {} 428static inline void lock_system_sleep(void) {}
423static inline void unlock_system_sleep(void) {} 429static inline void unlock_system_sleep(void) {}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1b72060f093a..37a585beef5c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -327,8 +327,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
327extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 327extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
328 gfp_t gfp_mask, nodemask_t *mask); 328 gfp_t gfp_mask, nodemask_t *mask);
329extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); 329extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
330extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 330extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
331 gfp_t gfp_mask, bool noswap); 331 unsigned long nr_pages,
332 gfp_t gfp_mask,
333 bool may_swap);
332extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 334extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
333 gfp_t gfp_mask, bool noswap, 335 gfp_t gfp_mask, bool noswap,
334 struct zone *zone, 336 struct zone *zone,
@@ -354,22 +356,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
354extern int page_evictable(struct page *page); 356extern int page_evictable(struct page *page);
355extern void check_move_unevictable_pages(struct page **, int nr_pages); 357extern void check_move_unevictable_pages(struct page **, int nr_pages);
356 358
357extern unsigned long scan_unevictable_pages;
358extern int scan_unevictable_handler(struct ctl_table *, int,
359 void __user *, size_t *, loff_t *);
360#ifdef CONFIG_NUMA
361extern int scan_unevictable_register_node(struct node *node);
362extern void scan_unevictable_unregister_node(struct node *node);
363#else
364static inline int scan_unevictable_register_node(struct node *node)
365{
366 return 0;
367}
368static inline void scan_unevictable_unregister_node(struct node *node)
369{
370}
371#endif
372
373extern int kswapd_run(int nid); 359extern int kswapd_run(int nid);
374extern void kswapd_stop(int nid); 360extern void kswapd_stop(int nid);
375#ifdef CONFIG_MEMCG 361#ifdef CONFIG_MEMCG
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0f86d85a9ce4..bda9b81357cc 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -65,6 +65,7 @@ struct old_linux_dirent;
65struct perf_event_attr; 65struct perf_event_attr;
66struct file_handle; 66struct file_handle;
67struct sigaltstack; 67struct sigaltstack;
68union bpf_attr;
68 69
69#include <linux/types.h> 70#include <linux/types.h>
70#include <linux/aio_abi.h> 71#include <linux/aio_abi.h>
@@ -875,5 +876,5 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
875 const char __user *uargs); 876 const char __user *uargs);
876asmlinkage long sys_getrandom(char __user *buf, size_t count, 877asmlinkage long sys_getrandom(char __user *buf, size_t count,
877 unsigned int flags); 878 unsigned int flags);
878 879asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
879#endif 880#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fa5258f322e7..c2dee7deefa8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,7 +19,6 @@
19 19
20 20
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/dmaengine.h>
23#include <net/sock.h> 22#include <net/sock.h>
24#include <net/inet_connection_sock.h> 23#include <net/inet_connection_sock.h>
25#include <net/inet_timewait_sock.h> 24#include <net/inet_timewait_sock.h>
@@ -166,13 +165,6 @@ struct tcp_sock {
166 struct iovec *iov; 165 struct iovec *iov;
167 int memory; 166 int memory;
168 int len; 167 int len;
169#ifdef CONFIG_NET_DMA
170 /* members for async copy */
171 struct dma_chan *dma_chan;
172 int wakeup;
173 struct dma_pinned_list *pinned_list;
174 dma_cookie_t dma_cookie;
175#endif
176 } ucopy; 168 } ucopy;
177 169
178 u32 snd_wl1; /* Sequence for window update */ 170 u32 snd_wl1; /* Sequence for window update */
@@ -276,7 +268,7 @@ struct tcp_sock {
276 u32 retrans_stamp; /* Timestamp of the last retransmit, 268 u32 retrans_stamp; /* Timestamp of the last retransmit,
277 * also used in SYN-SENT to remember stamp of 269 * also used in SYN-SENT to remember stamp of
278 * the first SYN. */ 270 * the first SYN. */
279 u32 undo_marker; /* tracking retrans started here. */ 271 u32 undo_marker; /* snd_una upon a new recovery episode. */
280 int undo_retrans; /* number of undoable retransmissions. */ 272 int undo_retrans; /* number of undoable retransmissions. */
281 u32 total_retrans; /* Total retransmits for entire connection */ 273 u32 total_retrans; /* Total retransmits for entire connection */
282 274
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 932b76392248..884d6263e962 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -268,7 +268,7 @@ struct kim_data_s {
268 struct st_data_s *core_data; 268 struct st_data_s *core_data;
269 struct chip_version version; 269 struct chip_version version;
270 unsigned char ldisc_install; 270 unsigned char ldisc_install;
271 unsigned char dev_name[UART_DEV_NAME_LEN]; 271 unsigned char dev_name[UART_DEV_NAME_LEN + 1];
272 unsigned char flow_cntrl; 272 unsigned char flow_cntrl;
273 unsigned long baud_rate; 273 unsigned long baud_rate;
274}; 274};
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 059052306831..595ee86f5e0d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -181,19 +181,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
181 return cpumask_test_cpu(cpu, tick_nohz_full_mask); 181 return cpumask_test_cpu(cpu, tick_nohz_full_mask);
182} 182}
183 183
184extern void tick_nohz_init(void);
185extern void __tick_nohz_full_check(void); 184extern void __tick_nohz_full_check(void);
185extern void tick_nohz_full_kick(void);
186extern void tick_nohz_full_kick_cpu(int cpu); 186extern void tick_nohz_full_kick_cpu(int cpu);
187
188static inline void tick_nohz_full_kick(void)
189{
190 tick_nohz_full_kick_cpu(smp_processor_id());
191}
192
193extern void tick_nohz_full_kick_all(void); 187extern void tick_nohz_full_kick_all(void);
194extern void __tick_nohz_task_switch(struct task_struct *tsk); 188extern void __tick_nohz_task_switch(struct task_struct *tsk);
195#else 189#else
196static inline void tick_nohz_init(void) { }
197static inline bool tick_nohz_full_enabled(void) { return false; } 190static inline bool tick_nohz_full_enabled(void) { return false; }
198static inline bool tick_nohz_full_cpu(int cpu) { return false; } 191static inline bool tick_nohz_full_cpu(int cpu) { return false; }
199static inline void __tick_nohz_full_check(void) { } 192static inline void __tick_nohz_full_check(void) { }
diff --git a/include/linux/topology.h b/include/linux/topology.h
index dda6ee521e74..909b6e43b694 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -119,11 +119,20 @@ static inline int numa_node_id(void)
119 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). 119 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
120 */ 120 */
121DECLARE_PER_CPU(int, _numa_mem_); 121DECLARE_PER_CPU(int, _numa_mem_);
122extern int _node_numa_mem_[MAX_NUMNODES];
122 123
123#ifndef set_numa_mem 124#ifndef set_numa_mem
124static inline void set_numa_mem(int node) 125static inline void set_numa_mem(int node)
125{ 126{
126 this_cpu_write(_numa_mem_, node); 127 this_cpu_write(_numa_mem_, node);
128 _node_numa_mem_[numa_node_id()] = node;
129}
130#endif
131
132#ifndef node_to_mem_node
133static inline int node_to_mem_node(int node)
134{
135 return _node_numa_mem_[node];
127} 136}
128#endif 137#endif
129 138
@@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu)
146static inline void set_cpu_numa_mem(int cpu, int node) 155static inline void set_cpu_numa_mem(int cpu, int node)
147{ 156{
148 per_cpu(_numa_mem_, cpu) = node; 157 per_cpu(_numa_mem_, cpu) = node;
158 _node_numa_mem_[cpu_to_node(cpu)] = node;
149} 159}
150#endif 160#endif
151 161
@@ -159,6 +169,13 @@ static inline int numa_mem_id(void)
159} 169}
160#endif 170#endif
161 171
172#ifndef node_to_mem_node
173static inline int node_to_mem_node(int node)
174{
175 return node;
176}
177#endif
178
162#ifndef cpu_to_mem 179#ifndef cpu_to_mem
163static inline int cpu_to_mem(int cpu) 180static inline int cpu_to_mem(int cpu)
164{ 181{
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 5ca58fcbaf1b..7759fc3c622d 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -51,7 +51,7 @@
51 51
52/* Definitions for online/offline exerciser. */ 52/* Definitions for online/offline exerciser. */
53int torture_onoff_init(long ooholdoff, long oointerval); 53int torture_onoff_init(long ooholdoff, long oointerval);
54char *torture_onoff_stats(char *page); 54void torture_onoff_stats(void);
55bool torture_onoff_failures(void); 55bool torture_onoff_failures(void);
56 56
57/* Low-rider random number generator. */ 57/* Low-rider random number generator. */
@@ -77,7 +77,8 @@ int torture_stutter_init(int s);
77/* Initialization and cleanup. */ 77/* Initialization and cleanup. */
78bool torture_init_begin(char *ttype, bool v, int *runnable); 78bool torture_init_begin(char *ttype, bool v, int *runnable);
79void torture_init_end(void); 79void torture_init_end(void);
80bool torture_cleanup(void); 80bool torture_cleanup_begin(void);
81void torture_cleanup_end(void);
81bool torture_must_stop(void); 82bool torture_must_stop(void);
82bool torture_must_stop_irq(void); 83bool torture_must_stop_irq(void);
83void torture_kthread_stopping(char *title); 84void torture_kthread_stopping(char *title);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b1293f15f592..e08e21e5f601 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -157,6 +157,12 @@ extern void syscall_unregfunc(void);
157 * Make sure the alignment of the structure in the __tracepoints section will 157 * Make sure the alignment of the structure in the __tracepoints section will
158 * not add unwanted padding between the beginning of the section and the 158 * not add unwanted padding between the beginning of the section and the
159 * structure. Force alignment to the same alignment as the section start. 159 * structure. Force alignment to the same alignment as the section start.
160 *
161 * When lockdep is enabled, we make sure to always do the RCU portions of
162 * the tracepoint code, regardless of whether tracing is on or we match the
163 * condition. This lets us find RCU issues triggered with tracepoints even
164 * when this tracepoint is off. This code has no purpose other than poking
165 * RCU a bit.
160 */ 166 */
161#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 167#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
162 extern struct tracepoint __tracepoint_##name; \ 168 extern struct tracepoint __tracepoint_##name; \
@@ -167,6 +173,11 @@ extern void syscall_unregfunc(void);
167 TP_PROTO(data_proto), \ 173 TP_PROTO(data_proto), \
168 TP_ARGS(data_args), \ 174 TP_ARGS(data_args), \
169 TP_CONDITION(cond),,); \ 175 TP_CONDITION(cond),,); \
176 if (IS_ENABLED(CONFIG_LOCKDEP)) { \
177 rcu_read_lock_sched_notrace(); \
178 rcu_dereference_sched(__tracepoint_##name.funcs);\
179 rcu_read_unlock_sched_notrace(); \
180 } \
170 } \ 181 } \
171 __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ 182 __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
172 PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ 183 PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 84132942902a..5171ef8f7b85 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -252,6 +252,7 @@ struct tty_struct {
252 struct rw_semaphore termios_rwsem; 252 struct rw_semaphore termios_rwsem;
253 struct mutex winsize_mutex; 253 struct mutex winsize_mutex;
254 spinlock_t ctrl_lock; 254 spinlock_t ctrl_lock;
255 spinlock_t flow_lock;
255 /* Termios values are protected by the termios rwsem */ 256 /* Termios values are protected by the termios rwsem */
256 struct ktermios termios, termios_locked; 257 struct ktermios termios, termios_locked;
257 struct termiox *termiox; /* May be NULL for unsupported */ 258 struct termiox *termiox; /* May be NULL for unsupported */
@@ -261,8 +262,13 @@ struct tty_struct {
261 unsigned long flags; 262 unsigned long flags;
262 int count; 263 int count;
263 struct winsize winsize; /* winsize_mutex */ 264 struct winsize winsize; /* winsize_mutex */
264 unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; 265 unsigned long stopped:1, /* flow_lock */
265 unsigned char ctrl_status; /* ctrl_lock */ 266 flow_stopped:1,
267 unused:BITS_PER_LONG - 2;
268 int hw_stopped;
269 unsigned long ctrl_status:8, /* ctrl_lock */
270 packet:1,
271 unused_ctrl:BITS_PER_LONG - 9;
266 unsigned int receive_room; /* Bytes free for queue */ 272 unsigned int receive_room; /* Bytes free for queue */
267 int flow_change; 273 int flow_change;
268 274
@@ -397,7 +403,9 @@ extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
397extern char *tty_name(struct tty_struct *tty, char *buf); 403extern char *tty_name(struct tty_struct *tty, char *buf);
398extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); 404extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
399extern int tty_check_change(struct tty_struct *tty); 405extern int tty_check_change(struct tty_struct *tty);
406extern void __stop_tty(struct tty_struct *tty);
400extern void stop_tty(struct tty_struct *tty); 407extern void stop_tty(struct tty_struct *tty);
408extern void __start_tty(struct tty_struct *tty);
401extern void start_tty(struct tty_struct *tty); 409extern void start_tty(struct tty_struct *tty);
402extern int tty_register_driver(struct tty_driver *driver); 410extern int tty_register_driver(struct tty_driver *driver);
403extern int tty_unregister_driver(struct tty_driver *driver); 411extern int tty_unregister_driver(struct tty_driver *driver);
@@ -411,6 +419,7 @@ extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
411extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp, 419extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
412 int buflen); 420 int buflen);
413extern void tty_write_message(struct tty_struct *tty, char *msg); 421extern void tty_write_message(struct tty_struct *tty, char *msg);
422extern int tty_send_xchar(struct tty_struct *tty, char ch);
414extern int tty_put_char(struct tty_struct *tty, unsigned char c); 423extern int tty_put_char(struct tty_struct *tty, unsigned char c);
415extern int tty_chars_in_buffer(struct tty_struct *tty); 424extern int tty_chars_in_buffer(struct tty_struct *tty);
416extern int tty_write_room(struct tty_struct *tty); 425extern int tty_write_room(struct tty_struct *tty);
@@ -495,8 +504,6 @@ extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
495extern struct mutex tty_mutex; 504extern struct mutex tty_mutex;
496extern spinlock_t tty_files_lock; 505extern spinlock_t tty_files_lock;
497 506
498extern void tty_write_unlock(struct tty_struct *tty);
499extern int tty_write_lock(struct tty_struct *tty, int ndelay);
500#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) 507#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
501 508
502extern void tty_port_init(struct tty_port *port); 509extern void tty_port_init(struct tty_port *port);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index e48c608a8fa8..92e337c18839 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -152,6 +152,8 @@
152 * This routine notifies the tty driver that it should stop 152 * This routine notifies the tty driver that it should stop
153 * outputting characters to the tty device. 153 * outputting characters to the tty device.
154 * 154 *
155 * Called with ->flow_lock held. Serialized with start() method.
156 *
155 * Optional: 157 * Optional:
156 * 158 *
157 * Note: Call stop_tty not this method. 159 * Note: Call stop_tty not this method.
@@ -161,6 +163,8 @@
161 * This routine notifies the tty driver that it resume sending 163 * This routine notifies the tty driver that it resume sending
162 * characters to the tty device. 164 * characters to the tty device.
163 * 165 *
166 * Called with ->flow_lock held. Serialized with stop() method.
167 *
164 * Optional: 168 * Optional:
165 * 169 *
166 * Note: Call start_tty not this method. 170 * Note: Call start_tty not this method.
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 247cfdcc4b08..ee3277593222 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,11 @@ struct udp_sock {
49 unsigned int corkflag; /* Cork is required */ 49 unsigned int corkflag; /* Cork is required */
50 __u8 encap_type; /* Is this an Encapsulation socket? */ 50 __u8 encap_type; /* Is this an Encapsulation socket? */
51 unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ 51 unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
52 no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ 52 no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
53 convert_csum:1;/* On receive, convert checksum
54 * unnecessary to checksum complete
55 * if possible.
56 */
53 /* 57 /*
54 * Following member retains the information to create a UDP header 58 * Following member retains the information to create a UDP header
55 * when the socket is uncorked. 59 * when the socket is uncorked.
@@ -98,6 +102,16 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
98 return udp_sk(sk)->no_check6_rx; 102 return udp_sk(sk)->no_check6_rx;
99} 103}
100 104
105static inline void udp_set_convert_csum(struct sock *sk, bool val)
106{
107 udp_sk(sk)->convert_csum = val;
108}
109
110static inline bool udp_get_convert_csum(struct sock *sk)
111{
112 return udp_sk(sk)->convert_csum;
113}
114
101#define udp_portaddr_for_each_entry(__sk, node, list) \ 115#define udp_portaddr_for_each_entry(__sk, node, list) \
102 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) 116 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
103 117
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 48d64e6ab292..9b1581414cd4 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -80,11 +80,14 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
80 struct iov_iter *i); 80 struct iov_iter *i);
81size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 81size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
82 struct iov_iter *i); 82 struct iov_iter *i);
83size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i);
84size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
85size_t iov_iter_zero(size_t bytes, struct iov_iter *);
83unsigned long iov_iter_alignment(const struct iov_iter *i); 86unsigned long iov_iter_alignment(const struct iov_iter *i);
84void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, 87void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
85 unsigned long nr_segs, size_t count); 88 unsigned long nr_segs, size_t count);
86ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 89ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
87 unsigned maxpages, size_t *start); 90 size_t maxsize, unsigned maxpages, size_t *start);
88ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 91ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
89 size_t maxsize, size_t *start); 92 size_t maxsize, size_t *start);
90int iov_iter_npages(const struct iov_iter *i, int maxpages); 93int iov_iter_npages(const struct iov_iter *i, int maxpages);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index d2465bc0e73c..447a7e2fc19b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1862,6 +1862,18 @@ extern void usb_unregister_notify(struct notifier_block *nb);
1862/* debugfs stuff */ 1862/* debugfs stuff */
1863extern struct dentry *usb_debug_root; 1863extern struct dentry *usb_debug_root;
1864 1864
1865/* LED triggers */
1866enum usb_led_event {
1867 USB_LED_EVENT_HOST = 0,
1868 USB_LED_EVENT_GADGET = 1,
1869};
1870
1871#ifdef CONFIG_USB_LED_TRIG
1872extern void usb_led_activity(enum usb_led_event ev);
1873#else
1874static inline void usb_led_activity(enum usb_led_event ev) {}
1875#endif
1876
1865#endif /* __KERNEL__ */ 1877#endif /* __KERNEL__ */
1866 1878
1867#endif 1879#endif
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index bbe779f640be..e14c09a45c5a 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -31,6 +31,7 @@ struct ci_hdrc_platform_data {
31#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 31#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
32 void (*notify_event) (struct ci_hdrc *ci, unsigned event); 32 void (*notify_event) (struct ci_hdrc *ci, unsigned event);
33 struct regulator *reg_vbus; 33 struct regulator *reg_vbus;
34 bool tpl_support;
34}; 35};
35 36
36/* Default offset of capability registers */ 37/* Default offset of capability registers */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index c3a61853cd13..522cafe26790 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -345,12 +345,13 @@ static inline int usb_ep_queue(struct usb_ep *ep,
345 * @ep:the endpoint associated with the request 345 * @ep:the endpoint associated with the request
346 * @req:the request being canceled 346 * @req:the request being canceled
347 * 347 *
348 * if the request is still active on the endpoint, it is dequeued and its 348 * If the request is still active on the endpoint, it is dequeued and its
349 * completion routine is called (with status -ECONNRESET); else a negative 349 * completion routine is called (with status -ECONNRESET); else a negative
350 * error code is returned. 350 * error code is returned. This is guaranteed to happen before the call to
351 * usb_ep_dequeue() returns.
351 * 352 *
352 * note that some hardware can't clear out write fifos (to unlink the request 353 * Note that some hardware can't clear out write fifos (to unlink the request
353 * at the head of the queue) except as part of disconnecting from usb. such 354 * at the head of the queue) except as part of disconnecting from usb. Such
354 * restrictions prevent drivers from supporting configuration changes, 355 * restrictions prevent drivers from supporting configuration changes,
355 * even to configuration zero (a "chapter 9" requirement). 356 * even to configuration zero (a "chapter 9" requirement).
356 */ 357 */
@@ -816,6 +817,8 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
816 * Called in a context that permits sleeping. 817 * Called in a context that permits sleeping.
817 * @suspend: Invoked on USB suspend. May be called in_interrupt. 818 * @suspend: Invoked on USB suspend. May be called in_interrupt.
818 * @resume: Invoked on USB resume. May be called in_interrupt. 819 * @resume: Invoked on USB resume. May be called in_interrupt.
820 * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers
821 * and should be called in_interrupt.
819 * @driver: Driver model state for this driver. 822 * @driver: Driver model state for this driver.
820 * 823 *
821 * Devices are disabled till a gadget driver successfully bind()s, which 824 * Devices are disabled till a gadget driver successfully bind()s, which
@@ -873,6 +876,7 @@ struct usb_gadget_driver {
873 void (*disconnect)(struct usb_gadget *); 876 void (*disconnect)(struct usb_gadget *);
874 void (*suspend)(struct usb_gadget *); 877 void (*suspend)(struct usb_gadget *);
875 void (*resume)(struct usb_gadget *); 878 void (*resume)(struct usb_gadget *);
879 void (*reset)(struct usb_gadget *);
876 880
877 /* FIXME support safe rmmod */ 881 /* FIXME support safe rmmod */
878 struct device_driver driver; 882 struct device_driver driver;
@@ -1013,6 +1017,20 @@ extern void usb_gadget_set_state(struct usb_gadget *gadget,
1013 1017
1014/*-------------------------------------------------------------------------*/ 1018/*-------------------------------------------------------------------------*/
1015 1019
1020/* utility to tell udc core that the bus reset occurs */
1021extern void usb_gadget_udc_reset(struct usb_gadget *gadget,
1022 struct usb_gadget_driver *driver);
1023
1024/*-------------------------------------------------------------------------*/
1025
1026/* utility to give requests back to the gadget layer */
1027
1028extern void usb_gadget_giveback_request(struct usb_ep *ep,
1029 struct usb_request *req);
1030
1031
1032/*-------------------------------------------------------------------------*/
1033
1016/* utility wrapping a simple endpoint selection policy */ 1034/* utility wrapping a simple endpoint selection policy */
1017 1035
1018extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *, 1036extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 485cd5e2100c..cd96a2bc3388 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -106,7 +106,8 @@ struct usb_hcd {
106 * OTG and some Host controllers need software interaction with phys; 106 * OTG and some Host controllers need software interaction with phys;
107 * other external phys should be software-transparent 107 * other external phys should be software-transparent
108 */ 108 */
109 struct usb_phy *phy; 109 struct usb_phy *usb_phy;
110 struct phy *phy;
110 111
111 /* Flags that need to be manipulated atomically because they can 112 /* Flags that need to be manipulated atomically because they can
112 * change while the host controller is running. Always use 113 * change while the host controller is running. Always use
@@ -144,6 +145,7 @@ struct usb_hcd {
144 unsigned has_tt:1; /* Integrated TT in root hub */ 145 unsigned has_tt:1; /* Integrated TT in root hub */
145 unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ 146 unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
146 unsigned can_do_streams:1; /* HC supports streams */ 147 unsigned can_do_streams:1; /* HC supports streams */
148 unsigned tpl_support:1; /* OTG & EH TPL support */
147 149
148 unsigned int irq; /* irq allocated */ 150 unsigned int irq; /* irq allocated */
149 void __iomem *regs; /* device memory/io */ 151 void __iomem *regs; /* device memory/io */
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 8c38aa26b3bb..cfe0528cdbb1 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -14,6 +14,7 @@
14#if IS_ENABLED(CONFIG_OF) 14#if IS_ENABLED(CONFIG_OF)
15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); 15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); 16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
17bool of_usb_host_tpl_support(struct device_node *np);
17#else 18#else
18static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) 19static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
19{ 20{
@@ -25,6 +26,10 @@ of_usb_get_maximum_speed(struct device_node *np)
25{ 26{
26 return USB_SPEED_UNKNOWN; 27 return USB_SPEED_UNKNOWN;
27} 28}
29static inline bool of_usb_host_tpl_support(struct device_node *np)
30{
31 return false;
32}
28#endif 33#endif
29 34
30#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) 35#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 55a17b188daa..9948c874e3f1 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -8,27 +8,27 @@
8#define __LINUX_USB_QUIRKS_H 8#define __LINUX_USB_QUIRKS_H
9 9
10/* string descriptors must not be fetched using a 255-byte read */ 10/* string descriptors must not be fetched using a 255-byte read */
11#define USB_QUIRK_STRING_FETCH_255 0x00000001 11#define USB_QUIRK_STRING_FETCH_255 BIT(0)
12 12
13/* device can't resume correctly so reset it instead */ 13/* device can't resume correctly so reset it instead */
14#define USB_QUIRK_RESET_RESUME 0x00000002 14#define USB_QUIRK_RESET_RESUME BIT(1)
15 15
16/* device can't handle Set-Interface requests */ 16/* device can't handle Set-Interface requests */
17#define USB_QUIRK_NO_SET_INTF 0x00000004 17#define USB_QUIRK_NO_SET_INTF BIT(2)
18 18
19/* device can't handle its Configuration or Interface strings */ 19/* device can't handle its Configuration or Interface strings */
20#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008 20#define USB_QUIRK_CONFIG_INTF_STRINGS BIT(3)
21 21
22/* device can't be reset(e.g morph devices), don't use reset */ 22/* device can't be reset(e.g morph devices), don't use reset */
23#define USB_QUIRK_RESET 0x00000010 23#define USB_QUIRK_RESET BIT(4)
24 24
25/* device has more interface descriptions than the bNumInterfaces count, 25/* device has more interface descriptions than the bNumInterfaces count,
26 and can't handle talking to these interfaces */ 26 and can't handle talking to these interfaces */
27#define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020 27#define USB_QUIRK_HONOR_BNUMINTERFACES BIT(5)
28 28
29/* device needs a pause during initialization, after we read the device 29/* device needs a pause during initialization, after we read the device
30 descriptor */ 30 descriptor */
31#define USB_QUIRK_DELAY_INIT 0x00000040 31#define USB_QUIRK_DELAY_INIT BIT(6)
32 32
33/* 33/*
34 * For high speed and super speed interupt endpoints, the USB 2.0 and 34 * For high speed and super speed interupt endpoints, the USB 2.0 and
@@ -39,6 +39,12 @@
39 * Devices with this quirk report their bInterval as the result of this 39 * Devices with this quirk report their bInterval as the result of this
40 * calculation instead of the exponent variable used in the calculation. 40 * calculation instead of the exponent variable used in the calculation.
41 */ 41 */
42#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080 42#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL BIT(7)
43
44/* device can't handle device_qualifier descriptor requests */
45#define USB_QUIRK_DEVICE_QUALIFIER BIT(8)
46
47/* device generates spurious wakeup, ignore remote wakeup capability */
48#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
43 49
44#endif /* __LINUX_USB_QUIRKS_H */ 50#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 9b7de1b46437..a7f2604c5f25 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -73,6 +73,10 @@
73 /* Device advertises UAS but it is broken */ \ 73 /* Device advertises UAS but it is broken */ \
74 US_FLAG(BROKEN_FUA, 0x01000000) \ 74 US_FLAG(BROKEN_FUA, 0x01000000) \
75 /* Cannot handle FUA in WRITE or READ CDBs */ \ 75 /* Cannot handle FUA in WRITE or READ CDBs */ \
76 US_FLAG(NO_ATA_1X, 0x02000000) \
77 /* Cannot handle ATA_12 or ATA_16 CDBs */ \
78 US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
79 /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
76 80
77#define US_FLAG(name, value) US_FL_##name = value , 81#define US_FLAG(name, value) US_FL_##name = value ,
78enum { US_DO_ALL_FLAGS }; 82enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 502073a53dd3..b483abd34493 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
64void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); 64void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
65 65
66int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); 66int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
67void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
67int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); 68int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
68#else 69#else
69 70
@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
82static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} 83static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
83 84
84static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } 85static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
85static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } 87static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86 88
87#endif 89#endif
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 2c02f3a8d2ba..c37bd4d06739 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -182,7 +182,6 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
182 * vga_get()... 182 * vga_get()...
183 */ 183 */
184 184
185#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
186#ifdef CONFIG_VGA_ARB 185#ifdef CONFIG_VGA_ARB
187extern struct pci_dev *vga_default_device(void); 186extern struct pci_dev *vga_default_device(void);
188extern void vga_set_default_device(struct pci_dev *pdev); 187extern void vga_set_default_device(struct pci_dev *pdev);
@@ -190,7 +189,6 @@ extern void vga_set_default_device(struct pci_dev *pdev);
190static inline struct pci_dev *vga_default_device(void) { return NULL; }; 189static inline struct pci_dev *vga_default_device(void) { return NULL; };
191static inline void vga_set_default_device(struct pci_dev *pdev) { }; 190static inline void vga_set_default_device(struct pci_dev *pdev) { };
192#endif 191#endif
193#endif
194 192
195/** 193/**
196 * vga_conflicts 194 * vga_conflicts
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ced92345c963..730334cdf037 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
72 THP_ZERO_PAGE_ALLOC, 72 THP_ZERO_PAGE_ALLOC,
73 THP_ZERO_PAGE_ALLOC_FAILED, 73 THP_ZERO_PAGE_ALLOC_FAILED,
74#endif 74#endif
75#ifdef CONFIG_MEMORY_BALLOON
76 BALLOON_INFLATE,
77 BALLOON_DEFLATE,
78#ifdef CONFIG_BALLOON_COMPACTION
79 BALLOON_MIGRATE,
80#endif
81#endif
75#ifdef CONFIG_DEBUG_TLBFLUSH 82#ifdef CONFIG_DEBUG_TLBFLUSH
76#ifdef CONFIG_SMP 83#ifdef CONFIG_SMP
77 NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ 84 NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6fb1ba5f9b2f..e4a8eb9312ea 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -25,7 +25,7 @@ struct wait_bit_key {
25 void *flags; 25 void *flags;
26 int bit_nr; 26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1 27#define WAIT_ATOMIC_T_BIT_NR -1
28 unsigned long private; 28 unsigned long timeout;
29}; 29};
30 30
31struct wait_bit_queue { 31struct wait_bit_queue {
@@ -154,6 +154,7 @@ int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_ac
154void wake_up_bit(void *, int); 154void wake_up_bit(void *, int);
155void wake_up_atomic_t(atomic_t *); 155void wake_up_atomic_t(atomic_t *);
156int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); 156int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
157int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
157int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); 158int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
158int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); 159int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
159wait_queue_head_t *bit_waitqueue(void *, int); 160wait_queue_head_t *bit_waitqueue(void *, int);
@@ -280,9 +281,11 @@ do { \
280 * wake_up() has to be called after changing any variable that could 281 * wake_up() has to be called after changing any variable that could
281 * change the result of the wait condition. 282 * change the result of the wait condition.
282 * 283 *
283 * The function returns 0 if the @timeout elapsed, or the remaining 284 * Returns:
284 * jiffies (at least 1) if the @condition evaluated to %true before 285 * 0 if the @condition evaluated to %false after the @timeout elapsed,
285 * the @timeout elapsed. 286 * 1 if the @condition evaluated to %true after the @timeout elapsed,
287 * or the remaining jiffies (at least 1) if the @condition evaluated
288 * to %true before the @timeout elapsed.
286 */ 289 */
287#define wait_event_timeout(wq, condition, timeout) \ 290#define wait_event_timeout(wq, condition, timeout) \
288({ \ 291({ \
@@ -363,9 +366,11 @@ do { \
363 * change the result of the wait condition. 366 * change the result of the wait condition.
364 * 367 *
365 * Returns: 368 * Returns:
366 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by 369 * 0 if the @condition evaluated to %false after the @timeout elapsed,
367 * a signal, or the remaining jiffies (at least 1) if the @condition 370 * 1 if the @condition evaluated to %true after the @timeout elapsed,
368 * evaluated to %true before the @timeout elapsed. 371 * the remaining jiffies (at least 1) if the @condition evaluated
372 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
373 * interrupted by a signal.
369 */ 374 */
370#define wait_event_interruptible_timeout(wq, condition, timeout) \ 375#define wait_event_interruptible_timeout(wq, condition, timeout) \
371({ \ 376({ \
@@ -859,6 +864,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
859 864
860extern int bit_wait(struct wait_bit_key *); 865extern int bit_wait(struct wait_bit_key *);
861extern int bit_wait_io(struct wait_bit_key *); 866extern int bit_wait_io(struct wait_bit_key *);
867extern int bit_wait_timeout(struct wait_bit_key *);
868extern int bit_wait_io_timeout(struct wait_bit_key *);
862 869
863/** 870/**
864 * wait_on_bit - wait for a bit to be cleared 871 * wait_on_bit - wait for a bit to be cleared
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a0cc2e95ed1b..b996e6cde6bb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
419 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ 419 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
420 1, (name)) 420 1, (name))
421#define create_singlethread_workqueue(name) \ 421#define create_singlethread_workqueue(name) \
422 alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name)) 422 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
423 423
424extern void destroy_workqueue(struct workqueue_struct *wq); 424extern void destroy_workqueue(struct workqueue_struct *wq);
425 425
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index e44d634e7fb7..05c214760977 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
46 enum zs_mapmode mm); 46 enum zs_mapmode mm);
47void zs_unmap_object(struct zs_pool *pool, unsigned long handle); 47void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
48 48
49u64 zs_get_total_size_bytes(struct zs_pool *pool); 49unsigned long zs_get_total_pages(struct zs_pool *pool);
50 50
51#endif 51#endif
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
index 852e96c4bb46..984fb79031de 100644
--- a/include/media/davinci/dm644x_ccdc.h
+++ b/include/media/davinci/dm644x_ccdc.h
@@ -114,7 +114,7 @@ struct ccdc_fault_pixel {
114 /* Number of fault pixel */ 114 /* Number of fault pixel */
115 unsigned short fp_num; 115 unsigned short fp_num;
116 /* Address of fault pixel table */ 116 /* Address of fault pixel table */
117 unsigned int fpc_table_addr; 117 unsigned long fpc_table_addr;
118}; 118};
119 119
120/* Structure for CCDC configuration parameters for raw capture mode passed 120/* Structure for CCDC configuration parameters for raw capture mode passed
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index c9d06d9f7e6e..398279dd1922 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -57,6 +57,8 @@ enum {
57 * 0 - Active high, 1 - Active low 57 * 0 - Active high, 1 - Active low
58 * @vs_pol: Vertical synchronization polarity 58 * @vs_pol: Vertical synchronization polarity
59 * 0 - Active high, 1 - Active low 59 * 0 - Active high, 1 - Active low
60 * @fld_pol: Field signal polarity
61 * 0 - Positive, 1 - Negative
60 * @data_pol: Data polarity 62 * @data_pol: Data polarity
61 * 0 - Normal, 1 - One's complement 63 * 0 - Normal, 1 - One's complement
62 */ 64 */
@@ -65,6 +67,7 @@ struct isp_parallel_platform_data {
65 unsigned int clk_pol:1; 67 unsigned int clk_pol:1;
66 unsigned int hs_pol:1; 68 unsigned int hs_pol:1;
67 unsigned int vs_pol:1; 69 unsigned int vs_pol:1;
70 unsigned int fld_pol:1;
68 unsigned int data_pol:1; 71 unsigned int data_pol:1;
69}; 72};
70 73
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 80f951890b4c..e7a1514075ec 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -135,6 +135,7 @@ void rc_map_init(void);
135#define RC_MAP_DM1105_NEC "rc-dm1105-nec" 135#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
136#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro" 136#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
137#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t" 137#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
138#define RC_MAP_DVBSKY "rc-dvbsky"
138#define RC_MAP_EMPTY "rc-empty" 139#define RC_MAP_EMPTY "rc-empty"
139#define RC_MAP_EM_TERRATEC "rc-em-terratec" 140#define RC_MAP_EM_TERRATEC "rc-em-terratec"
140#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2" 141#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index fc910a622451..6ef2d01197da 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -295,7 +295,7 @@ struct vb2_buffer {
295 * can return an error if hardware fails, in that case all 295 * can return an error if hardware fails, in that case all
296 * buffers that have been already given by the @buf_queue 296 * buffers that have been already given by the @buf_queue
297 * callback are to be returned by the driver by calling 297 * callback are to be returned by the driver by calling
298 * @vb2_buffer_done(VB2_BUF_STATE_DEQUEUED). 298 * @vb2_buffer_done(VB2_BUF_STATE_QUEUED).
299 * If you need a minimum number of buffers before you can 299 * If you need a minimum number of buffers before you can
300 * start streaming, then set @min_buffers_needed in the 300 * start streaming, then set @min_buffers_needed in the
301 * vb2_queue structure. If that is non-zero then 301 * vb2_queue structure. If that is non-zero then
@@ -356,8 +356,8 @@ struct v4l2_fh;
356 * @buf_struct_size: size of the driver-specific buffer structure; 356 * @buf_struct_size: size of the driver-specific buffer structure;
357 * "0" indicates the driver doesn't want to use a custom buffer 357 * "0" indicates the driver doesn't want to use a custom buffer
358 * structure type, so sizeof(struct vb2_buffer) will is used 358 * structure type, so sizeof(struct vb2_buffer) will is used
359 * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAGS_TIMESTAMP_* and 359 * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
360 * V4L2_BUF_FLAGS_TSTAMP_SRC_* 360 * V4L2_BUF_FLAG_TSTAMP_SRC_*
361 * @gfp_flags: additional gfp flags used when allocating the buffers. 361 * @gfp_flags: additional gfp flags used when allocating the buffers.
362 * Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32 362 * Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32
363 * to force the buffer allocation to a specific memory zone. 363 * to force the buffer allocation to a specific memory zone.
@@ -366,6 +366,7 @@ struct v4l2_fh;
366 * cannot be started unless at least this number of buffers 366 * cannot be started unless at least this number of buffers
367 * have been queued into the driver. 367 * have been queued into the driver.
368 * 368 *
369 * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
369 * @memory: current memory type used 370 * @memory: current memory type used
370 * @bufs: videobuf buffer structures 371 * @bufs: videobuf buffer structures
371 * @num_buffers: number of allocated/used buffers 372 * @num_buffers: number of allocated/used buffers
@@ -380,6 +381,9 @@ struct v4l2_fh;
380 * @start_streaming_called: start_streaming() was called successfully and we 381 * @start_streaming_called: start_streaming() was called successfully and we
381 * started streaming. 382 * started streaming.
382 * @error: a fatal error occurred on the queue 383 * @error: a fatal error occurred on the queue
384 * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
385 * buffers. Only set for capture queues if qbuf has not yet been
386 * called since poll() needs to return POLLERR in that situation.
383 * @fileio: file io emulator internal data, used only if emulator is active 387 * @fileio: file io emulator internal data, used only if emulator is active
384 * @threadio: thread io internal data, used only if thread is active 388 * @threadio: thread io internal data, used only if thread is active
385 */ 389 */
@@ -399,6 +403,7 @@ struct vb2_queue {
399 u32 min_buffers_needed; 403 u32 min_buffers_needed;
400 404
401/* private: internal use only */ 405/* private: internal use only */
406 struct mutex mmap_lock;
402 enum v4l2_memory memory; 407 enum v4l2_memory memory;
403 struct vb2_buffer *bufs[VIDEO_MAX_FRAME]; 408 struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
404 unsigned int num_buffers; 409 unsigned int num_buffers;
@@ -417,6 +422,7 @@ struct vb2_queue {
417 unsigned int streaming:1; 422 unsigned int streaming:1;
418 unsigned int start_streaming_called:1; 423 unsigned int start_streaming_called:1;
419 unsigned int error:1; 424 unsigned int error:1;
425 unsigned int waiting_for_buffers:1;
420 426
421 struct vb2_fileio_data *fileio; 427 struct vb2_fileio_data *fileio;
422 struct vb2_threadio_data *threadio; 428 struct vb2_threadio_data *threadio;
@@ -588,6 +594,15 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
588 return 0; 594 return 0;
589} 595}
590 596
597/**
598 * vb2_start_streaming_called() - return streaming status of driver
599 * @q: videobuf queue
600 */
601static inline bool vb2_start_streaming_called(struct vb2_queue *q)
602{
603 return q->start_streaming_called;
604}
605
591/* 606/*
592 * The following functions are not part of the vb2 core API, but are simple 607 * The following functions are not part of the vb2 core API, but are simple
593 * helper functions that you can use in your struct v4l2_file_operations, 608 * helper functions that you can use in your struct v4l2_file_operations,
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
new file mode 100644
index 000000000000..975cc7861f18
--- /dev/null
+++ b/include/misc/cxl.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef _MISC_CXL_H
11#define _MISC_CXL_H
12
13#ifdef CONFIG_CXL_BASE
14
15#define CXL_IRQ_RANGES 4
16
17struct cxl_irq_ranges {
18 irq_hw_number_t offset[CXL_IRQ_RANGES];
19 irq_hw_number_t range[CXL_IRQ_RANGES];
20};
21
22extern atomic_t cxl_use_count;
23
24static inline bool cxl_ctx_in_use(void)
25{
26 return (atomic_read(&cxl_use_count) != 0);
27}
28
29static inline void cxl_ctx_get(void)
30{
31 atomic_inc(&cxl_use_count);
32}
33
34static inline void cxl_ctx_put(void)
35{
36 atomic_dec(&cxl_use_count);
37}
38
39void cxl_slbia(struct mm_struct *mm);
40
41#else /* CONFIG_CXL_BASE */
42
43static inline bool cxl_ctx_in_use(void) { return false; }
44static inline void cxl_slbia(struct mm_struct *mm) {}
45
46#endif /* CONFIG_CXL_BASE */
47
48#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f679877bb601..d13573bb879e 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -202,8 +202,9 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
202 const struct in6_addr *addr); 202 const struct in6_addr *addr);
203void ipv6_sock_ac_close(struct sock *sk); 203void ipv6_sock_ac_close(struct sock *sk);
204 204
205int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); 205int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
206int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); 206int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
207void ipv6_ac_destroy_dev(struct inet6_dev *idev);
207bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, 208bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
208 const struct in6_addr *addr); 209 const struct in6_addr *addr);
209bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, 210bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
diff --git a/include/net/ah.h b/include/net/ah.h
index ca95b98969dd..4e2dfa474a7e 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -3,9 +3,6 @@
3 3
4#include <linux/skbuff.h> 4#include <linux/skbuff.h>
5 5
6/* This is the maximum truncated ICV length that we know of. */
7#define MAX_AH_AUTH_LEN 64
8
9struct crypto_ahash; 6struct crypto_ahash;
10 7
11struct ah_data { 8struct ah_data {
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 373000de610d..58695ffeb138 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -120,9 +120,9 @@ struct bt_voice {
120#define BT_RCVMTU 13 120#define BT_RCVMTU 13
121 121
122__printf(1, 2) 122__printf(1, 2)
123int bt_info(const char *fmt, ...); 123void bt_info(const char *fmt, ...);
124__printf(1, 2) 124__printf(1, 2)
125int bt_err(const char *fmt, ...); 125void bt_err(const char *fmt, ...);
126 126
127#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) 127#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
128#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) 128#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
@@ -284,6 +284,7 @@ struct hci_req_ctrl {
284struct bt_skb_cb { 284struct bt_skb_cb {
285 __u8 pkt_type; 285 __u8 pkt_type;
286 __u8 incoming; 286 __u8 incoming;
287 __u16 opcode;
287 __u16 expect; 288 __u16 expect;
288 __u8 force_active; 289 __u8 force_active;
289 struct l2cap_chan *chan; 290 struct l2cap_chan *chan;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 3f8547f1c6f8..6e8f24967308 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -385,6 +385,7 @@ enum {
385#define HCI_ERROR_AUTH_FAILURE 0x05 385#define HCI_ERROR_AUTH_FAILURE 0x05
386#define HCI_ERROR_MEMORY_EXCEEDED 0x07 386#define HCI_ERROR_MEMORY_EXCEEDED 0x07
387#define HCI_ERROR_CONNECTION_TIMEOUT 0x08 387#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
388#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
388#define HCI_ERROR_REJ_BAD_ADDR 0x0f 389#define HCI_ERROR_REJ_BAD_ADDR 0x0f
389#define HCI_ERROR_REMOTE_USER_TERM 0x13 390#define HCI_ERROR_REMOTE_USER_TERM 0x13
390#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14 391#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b5d5af3aa469..37ff1aef0845 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -302,7 +302,7 @@ struct hci_dev {
302 __u32 req_status; 302 __u32 req_status;
303 __u32 req_result; 303 __u32 req_result;
304 304
305 struct crypto_blkcipher *tfm_aes; 305 void *smp_data;
306 306
307 struct discovery_state discovery; 307 struct discovery_state discovery;
308 struct hci_conn_hash conn_hash; 308 struct hci_conn_hash conn_hash;
@@ -464,6 +464,8 @@ struct hci_conn_params {
464 HCI_AUTO_CONN_ALWAYS, 464 HCI_AUTO_CONN_ALWAYS,
465 HCI_AUTO_CONN_LINK_LOSS, 465 HCI_AUTO_CONN_LINK_LOSS,
466 } auto_connect; 466 } auto_connect;
467
468 struct hci_conn *conn;
467}; 469};
468 470
469extern struct list_head hci_dev_list; 471extern struct list_head hci_dev_list;
@@ -537,7 +539,6 @@ enum {
537 HCI_CONN_RSWITCH_PEND, 539 HCI_CONN_RSWITCH_PEND,
538 HCI_CONN_MODE_CHANGE_PEND, 540 HCI_CONN_MODE_CHANGE_PEND,
539 HCI_CONN_SCO_SETUP_PEND, 541 HCI_CONN_SCO_SETUP_PEND,
540 HCI_CONN_LE_SMP_PEND,
541 HCI_CONN_MGMT_CONNECTED, 542 HCI_CONN_MGMT_CONNECTED,
542 HCI_CONN_SSP_ENABLED, 543 HCI_CONN_SSP_ENABLED,
543 HCI_CONN_SC_ENABLED, 544 HCI_CONN_SC_ENABLED,
@@ -551,6 +552,7 @@ enum {
551 HCI_CONN_FIPS, 552 HCI_CONN_FIPS,
552 HCI_CONN_STK_ENCRYPT, 553 HCI_CONN_STK_ENCRYPT,
553 HCI_CONN_AUTH_INITIATOR, 554 HCI_CONN_AUTH_INITIATOR,
555 HCI_CONN_DROP,
554}; 556};
555 557
556static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 558static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -700,7 +702,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
700 return NULL; 702 return NULL;
701} 703}
702 704
703void hci_disconnect(struct hci_conn *conn, __u8 reason); 705int hci_disconnect(struct hci_conn *conn, __u8 reason);
704bool hci_setup_sync(struct hci_conn *conn, __u16 handle); 706bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
705void hci_sco_setup(struct hci_conn *conn, __u8 status); 707void hci_sco_setup(struct hci_conn *conn, __u8 status);
706 708
@@ -754,9 +756,10 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status);
754 * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). 756 * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
755 */ 757 */
756 758
757static inline void hci_conn_get(struct hci_conn *conn) 759static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
758{ 760{
759 get_device(&conn->dev); 761 get_device(&conn->dev);
762 return conn;
760} 763}
761 764
762static inline void hci_conn_put(struct hci_conn *conn) 765static inline void hci_conn_put(struct hci_conn *conn)
@@ -788,7 +791,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
788 if (!conn->out) 791 if (!conn->out)
789 timeo *= 2; 792 timeo *= 2;
790 } else { 793 } else {
791 timeo = msecs_to_jiffies(10); 794 timeo = 0;
792 } 795 }
793 break; 796 break;
794 797
@@ -797,7 +800,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
797 break; 800 break;
798 801
799 default: 802 default:
800 timeo = msecs_to_jiffies(10); 803 timeo = 0;
801 break; 804 break;
802 } 805 }
803 806
@@ -923,7 +926,6 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
923void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 926void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
924 927
925int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); 928int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
926int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
927int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); 929int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
928 930
929void hci_init_sysfs(struct hci_dev *hdev); 931void hci_init_sysfs(struct hci_dev *hdev);
@@ -968,6 +970,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
968#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) 970#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
969#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) 971#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
970 972
973#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
974 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
975
971/* ----- HCI protocols ----- */ 976/* ----- HCI protocols ----- */
972#define HCI_PROTO_DEFER 0x01 977#define HCI_PROTO_DEFER 0x01
973 978
@@ -1256,6 +1261,8 @@ bool hci_req_pending(struct hci_dev *hdev);
1256void hci_req_add_le_scan_disable(struct hci_request *req); 1261void hci_req_add_le_scan_disable(struct hci_request *req);
1257void hci_req_add_le_passive_scan(struct hci_request *req); 1262void hci_req_add_le_passive_scan(struct hci_request *req);
1258 1263
1264void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req);
1265
1259struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 1266struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1260 const void *param, u32 timeout); 1267 const void *param, u32 timeout);
1261struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, 1268struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1334,8 +1341,7 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1334int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, 1341int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
1335 u8 link_type, u8 addr_type, u32 passkey, 1342 u8 link_type, u8 addr_type, u32 passkey,
1336 u8 entered); 1343 u8 entered);
1337void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1344void mgmt_auth_failed(struct hci_conn *conn, u8 status);
1338 u8 addr_type, u8 status);
1339void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); 1345void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
1340void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); 1346void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
1341void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); 1347void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
@@ -1351,6 +1357,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1351void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1357void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1352 u8 addr_type, s8 rssi, u8 *name, u8 name_len); 1358 u8 addr_type, s8 rssi, u8 *name, u8 name_len);
1353void mgmt_discovering(struct hci_dev *hdev, u8 discovering); 1359void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
1360bool mgmt_powering_down(struct hci_dev *hdev);
1354void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); 1361void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
1355void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); 1362void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
1356void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, 1363void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 8df15ad0d43f..ead99f032f7a 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -633,10 +633,11 @@ struct l2cap_conn {
633 struct sk_buff_head pending_rx; 633 struct sk_buff_head pending_rx;
634 struct work_struct pending_rx_work; 634 struct work_struct pending_rx_work;
635 635
636 struct work_struct id_addr_update_work;
637
636 __u8 disc_reason; 638 __u8 disc_reason;
637 639
638 struct delayed_work security_timer; 640 struct l2cap_chan *smp;
639 struct smp_chan *smp_chan;
640 641
641 struct list_head chan_l; 642 struct list_head chan_l;
642 struct mutex chan_lock; 643 struct mutex chan_lock;
@@ -708,6 +709,8 @@ enum {
708 FLAG_EFS_ENABLE, 709 FLAG_EFS_ENABLE,
709 FLAG_DEFER_SETUP, 710 FLAG_DEFER_SETUP,
710 FLAG_LE_CONN_REQ_SENT, 711 FLAG_LE_CONN_REQ_SENT,
712 FLAG_PENDING_SECURITY,
713 FLAG_HOLD_HCI_CONN,
711}; 714};
712 715
713enum { 716enum {
@@ -837,18 +840,43 @@ static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan
837 return NULL; 840 return NULL;
838} 841}
839 842
843static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb)
844{
845 return -ENOSYS;
846}
847
848static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan,
849 unsigned long hdr_len,
850 unsigned long len, int nb)
851{
852 return ERR_PTR(-ENOSYS);
853}
854
840static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err) 855static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
841{ 856{
842} 857}
843 858
859static inline void l2cap_chan_no_close(struct l2cap_chan *chan)
860{
861}
862
844static inline void l2cap_chan_no_ready(struct l2cap_chan *chan) 863static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
845{ 864{
846} 865}
847 866
867static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan,
868 int state, int err)
869{
870}
871
848static inline void l2cap_chan_no_defer(struct l2cap_chan *chan) 872static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
849{ 873{
850} 874}
851 875
876static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan)
877{
878}
879
852static inline void l2cap_chan_no_resume(struct l2cap_chan *chan) 880static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
853{ 881{
854} 882}
@@ -911,14 +939,13 @@ int l2cap_ertm_init(struct l2cap_chan *chan);
911void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); 939void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
912void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); 940void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
913void l2cap_chan_del(struct l2cap_chan *chan, int err); 941void l2cap_chan_del(struct l2cap_chan *chan, int err);
914void l2cap_conn_update_id_addr(struct hci_conn *hcon);
915void l2cap_send_conn_req(struct l2cap_chan *chan); 942void l2cap_send_conn_req(struct l2cap_chan *chan);
916void l2cap_move_start(struct l2cap_chan *chan); 943void l2cap_move_start(struct l2cap_chan *chan);
917void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, 944void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
918 u8 status); 945 u8 status);
919void __l2cap_physical_cfm(struct l2cap_chan *chan, int result); 946void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
920 947
921void l2cap_conn_get(struct l2cap_conn *conn); 948struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
922void l2cap_conn_put(struct l2cap_conn *conn); 949void l2cap_conn_put(struct l2cap_conn *conn);
923 950
924int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user); 951int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0a080c4de275..a2ddcf2398fd 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4,6 +4,7 @@
4 * 802.11 device and configuration interface 4 * 802.11 device and configuration interface
5 * 5 *
6 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -663,6 +664,7 @@ struct cfg80211_acl_data {
663 * @crypto: crypto settings 664 * @crypto: crypto settings
664 * @privacy: the BSS uses privacy 665 * @privacy: the BSS uses privacy
665 * @auth_type: Authentication type (algorithm) 666 * @auth_type: Authentication type (algorithm)
667 * @smps_mode: SMPS mode
666 * @inactivity_timeout: time in seconds to determine station's inactivity. 668 * @inactivity_timeout: time in seconds to determine station's inactivity.
667 * @p2p_ctwindow: P2P CT Window 669 * @p2p_ctwindow: P2P CT Window
668 * @p2p_opp_ps: P2P opportunistic PS 670 * @p2p_opp_ps: P2P opportunistic PS
@@ -681,6 +683,7 @@ struct cfg80211_ap_settings {
681 struct cfg80211_crypto_settings crypto; 683 struct cfg80211_crypto_settings crypto;
682 bool privacy; 684 bool privacy;
683 enum nl80211_auth_type auth_type; 685 enum nl80211_auth_type auth_type;
686 enum nl80211_smps_mode smps_mode;
684 int inactivity_timeout; 687 int inactivity_timeout;
685 u8 p2p_ctwindow; 688 u8 p2p_ctwindow;
686 bool p2p_opp_ps; 689 bool p2p_opp_ps;
@@ -1503,12 +1506,14 @@ enum cfg80211_signal_type {
1503 * @tsf: TSF contained in the frame that carried these IEs 1506 * @tsf: TSF contained in the frame that carried these IEs
1504 * @rcu_head: internal use, for freeing 1507 * @rcu_head: internal use, for freeing
1505 * @len: length of the IEs 1508 * @len: length of the IEs
1509 * @from_beacon: these IEs are known to come from a beacon
1506 * @data: IE data 1510 * @data: IE data
1507 */ 1511 */
1508struct cfg80211_bss_ies { 1512struct cfg80211_bss_ies {
1509 u64 tsf; 1513 u64 tsf;
1510 struct rcu_head rcu_head; 1514 struct rcu_head rcu_head;
1511 int len; 1515 int len;
1516 bool from_beacon;
1512 u8 data[]; 1517 u8 data[];
1513}; 1518};
1514 1519
@@ -1605,10 +1610,12 @@ struct cfg80211_auth_request {
1605 * 1610 *
1606 * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) 1611 * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
1607 * @ASSOC_REQ_DISABLE_VHT: Disable VHT 1612 * @ASSOC_REQ_DISABLE_VHT: Disable VHT
1613 * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association
1608 */ 1614 */
1609enum cfg80211_assoc_req_flags { 1615enum cfg80211_assoc_req_flags {
1610 ASSOC_REQ_DISABLE_HT = BIT(0), 1616 ASSOC_REQ_DISABLE_HT = BIT(0),
1611 ASSOC_REQ_DISABLE_VHT = BIT(1), 1617 ASSOC_REQ_DISABLE_VHT = BIT(1),
1618 ASSOC_REQ_USE_RRM = BIT(2),
1612}; 1619};
1613 1620
1614/** 1621/**
@@ -1800,6 +1807,7 @@ struct cfg80211_connect_params {
1800 * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed 1807 * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
1801 * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed 1808 * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
1802 * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed 1809 * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
1810 * @WIPHY_PARAM_DYN_ACK: dynack has been enabled
1803 */ 1811 */
1804enum wiphy_params_flags { 1812enum wiphy_params_flags {
1805 WIPHY_PARAM_RETRY_SHORT = 1 << 0, 1813 WIPHY_PARAM_RETRY_SHORT = 1 << 0,
@@ -1807,6 +1815,7 @@ enum wiphy_params_flags {
1807 WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2, 1815 WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
1808 WIPHY_PARAM_RTS_THRESHOLD = 1 << 3, 1816 WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
1809 WIPHY_PARAM_COVERAGE_CLASS = 1 << 4, 1817 WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
1818 WIPHY_PARAM_DYN_ACK = 1 << 5,
1810}; 1819};
1811 1820
1812/* 1821/*
@@ -1973,14 +1982,12 @@ struct cfg80211_wowlan_wakeup {
1973 1982
1974/** 1983/**
1975 * struct cfg80211_gtk_rekey_data - rekey data 1984 * struct cfg80211_gtk_rekey_data - rekey data
1976 * @kek: key encryption key 1985 * @kek: key encryption key (NL80211_KEK_LEN bytes)
1977 * @kck: key confirmation key 1986 * @kck: key confirmation key (NL80211_KCK_LEN bytes)
1978 * @replay_ctr: replay counter 1987 * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
1979 */ 1988 */
1980struct cfg80211_gtk_rekey_data { 1989struct cfg80211_gtk_rekey_data {
1981 u8 kek[NL80211_KEK_LEN]; 1990 const u8 *kek, *kck, *replay_ctr;
1982 u8 kck[NL80211_KCK_LEN];
1983 u8 replay_ctr[NL80211_REPLAY_CTR_LEN];
1984}; 1991};
1985 1992
1986/** 1993/**
@@ -2313,6 +2320,17 @@ struct cfg80211_qos_map {
2313 * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the 2320 * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
2314 * given interface This is used e.g. for dynamic HT 20/40 MHz channel width 2321 * given interface This is used e.g. for dynamic HT 20/40 MHz channel width
2315 * changes during the lifetime of the BSS. 2322 * changes during the lifetime of the BSS.
2323 *
2324 * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device
2325 * with the given parameters; action frame exchange has been handled by
2326 * userspace so this just has to modify the TX path to take the TS into
2327 * account.
2328 * If the admitted time is 0 just validate the parameters to make sure
2329 * the session can be created at all; it is valid to just always return
2330 * success for that but that may result in inefficient behaviour (handshake
2331 * with the peer followed by immediate teardown when the addition is later
2332 * rejected)
2333 * @del_tx_ts: remove an existing TX TS
2316 */ 2334 */
2317struct cfg80211_ops { 2335struct cfg80211_ops {
2318 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); 2336 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2553,6 +2571,12 @@ struct cfg80211_ops {
2553 2571
2554 int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev, 2572 int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
2555 struct cfg80211_chan_def *chandef); 2573 struct cfg80211_chan_def *chandef);
2574
2575 int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
2576 u8 tsid, const u8 *peer, u8 user_prio,
2577 u16 admitted_time);
2578 int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev,
2579 u8 tsid, const u8 *peer);
2556}; 2580};
2557 2581
2558/* 2582/*
@@ -2599,9 +2623,13 @@ struct cfg80211_ops {
2599 * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. 2623 * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
2600 * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in 2624 * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
2601 * beaconing mode (AP, IBSS, Mesh, ...). 2625 * beaconing mode (AP, IBSS, Mesh, ...).
2626 * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM
2627 * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS
2628 * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it
2629 * needs to be able to handle Block-Ack agreements and other things.
2602 */ 2630 */
2603enum wiphy_flags { 2631enum wiphy_flags {
2604 /* use hole at 0 */ 2632 WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0),
2605 /* use hole at 1 */ 2633 /* use hole at 1 */
2606 /* use hole at 2 */ 2634 /* use hole at 2 */
2607 WIPHY_FLAG_NETNS_OK = BIT(3), 2635 WIPHY_FLAG_NETNS_OK = BIT(3),
@@ -3765,11 +3793,25 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
3765} 3793}
3766 3794
3767/** 3795/**
3768 * cfg80211_inform_bss - inform cfg80211 of a new BSS 3796 * enum cfg80211_bss_frame_type - frame type that the BSS data came from
3797 * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
3798 * from a beacon or probe response
3799 * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
3800 * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
3801 */
3802enum cfg80211_bss_frame_type {
3803 CFG80211_BSS_FTYPE_UNKNOWN,
3804 CFG80211_BSS_FTYPE_BEACON,
3805 CFG80211_BSS_FTYPE_PRESP,
3806};
3807
3808/**
3809 * cfg80211_inform_bss_width - inform cfg80211 of a new BSS
3769 * 3810 *
3770 * @wiphy: the wiphy reporting the BSS 3811 * @wiphy: the wiphy reporting the BSS
3771 * @rx_channel: The channel the frame was received on 3812 * @rx_channel: The channel the frame was received on
3772 * @scan_width: width of the control channel 3813 * @scan_width: width of the control channel
3814 * @ftype: frame type (if known)
3773 * @bssid: the BSSID of the BSS 3815 * @bssid: the BSSID of the BSS
3774 * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) 3816 * @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
3775 * @capability: the capability field sent by the peer 3817 * @capability: the capability field sent by the peer
@@ -3789,6 +3831,7 @@ struct cfg80211_bss * __must_check
3789cfg80211_inform_bss_width(struct wiphy *wiphy, 3831cfg80211_inform_bss_width(struct wiphy *wiphy,
3790 struct ieee80211_channel *rx_channel, 3832 struct ieee80211_channel *rx_channel,
3791 enum nl80211_bss_scan_width scan_width, 3833 enum nl80211_bss_scan_width scan_width,
3834 enum cfg80211_bss_frame_type ftype,
3792 const u8 *bssid, u64 tsf, u16 capability, 3835 const u8 *bssid, u64 tsf, u16 capability,
3793 u16 beacon_interval, const u8 *ie, size_t ielen, 3836 u16 beacon_interval, const u8 *ie, size_t ielen,
3794 s32 signal, gfp_t gfp); 3837 s32 signal, gfp_t gfp);
@@ -3796,12 +3839,13 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
3796static inline struct cfg80211_bss * __must_check 3839static inline struct cfg80211_bss * __must_check
3797cfg80211_inform_bss(struct wiphy *wiphy, 3840cfg80211_inform_bss(struct wiphy *wiphy,
3798 struct ieee80211_channel *rx_channel, 3841 struct ieee80211_channel *rx_channel,
3842 enum cfg80211_bss_frame_type ftype,
3799 const u8 *bssid, u64 tsf, u16 capability, 3843 const u8 *bssid, u64 tsf, u16 capability,
3800 u16 beacon_interval, const u8 *ie, size_t ielen, 3844 u16 beacon_interval, const u8 *ie, size_t ielen,
3801 s32 signal, gfp_t gfp) 3845 s32 signal, gfp_t gfp)
3802{ 3846{
3803 return cfg80211_inform_bss_width(wiphy, rx_channel, 3847 return cfg80211_inform_bss_width(wiphy, rx_channel,
3804 NL80211_BSS_CHAN_WIDTH_20, 3848 NL80211_BSS_CHAN_WIDTH_20, ftype,
3805 bssid, tsf, capability, 3849 bssid, tsf, capability,
3806 beacon_interval, ie, ielen, signal, 3850 beacon_interval, ie, ielen, signal,
3807 gfp); 3851 gfp);
@@ -3902,6 +3946,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
3902 * moves to cfg80211 in this call 3946 * moves to cfg80211 in this call
3903 * @buf: authentication frame (header + body) 3947 * @buf: authentication frame (header + body)
3904 * @len: length of the frame data 3948 * @len: length of the frame data
3949 * @uapsd_queues: bitmap of ACs configured to uapsd. -1 if n/a.
3905 * 3950 *
3906 * After being asked to associate via cfg80211_ops::assoc() the driver must 3951 * After being asked to associate via cfg80211_ops::assoc() the driver must
3907 * call either this function or cfg80211_auth_timeout(). 3952 * call either this function or cfg80211_auth_timeout().
@@ -3910,7 +3955,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
3910 */ 3955 */
3911void cfg80211_rx_assoc_resp(struct net_device *dev, 3956void cfg80211_rx_assoc_resp(struct net_device *dev,
3912 struct cfg80211_bss *bss, 3957 struct cfg80211_bss *bss,
3913 const u8 *buf, size_t len); 3958 const u8 *buf, size_t len,
3959 int uapsd_queues);
3914 3960
3915/** 3961/**
3916 * cfg80211_assoc_timeout - notification of timed out association 3962 * cfg80211_assoc_timeout - notification of timed out association
@@ -4412,7 +4458,6 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
4412 * @buf: Management frame (header + body) 4458 * @buf: Management frame (header + body)
4413 * @len: length of the frame data 4459 * @len: length of the frame data
4414 * @flags: flags, as defined in enum nl80211_rxmgmt_flags 4460 * @flags: flags, as defined in enum nl80211_rxmgmt_flags
4415 * @gfp: context flags
4416 * 4461 *
4417 * This function is called whenever an Action frame is received for a station 4462 * This function is called whenever an Action frame is received for a station
4418 * mode interface, but is not processed in kernel. 4463 * mode interface, but is not processed in kernel.
@@ -4423,7 +4468,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
4423 * driver is responsible for rejecting the frame. 4468 * driver is responsible for rejecting the frame.
4424 */ 4469 */
4425bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, 4470bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
4426 const u8 *buf, size_t len, u32 flags, gfp_t gfp); 4471 const u8 *buf, size_t len, u32 flags);
4427 4472
4428/** 4473/**
4429 * cfg80211_mgmt_tx_status - notification of TX status for management frame 4474 * cfg80211_mgmt_tx_status - notification of TX status for management frame
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 87cb1903640d..6465bae80a4f 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,9 +122,7 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
122 122
123static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) 123static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
124{ 124{
125 __be32 diff[] = { ~from, to }; 125 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
126
127 *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum)));
128} 126}
129 127
130/* Implements RFC 1624 (Incremental Internet Checksum) 128/* Implements RFC 1624 (Incremental Internet Checksum)
diff --git a/include/net/codel.h b/include/net/codel.h
index fe0eab32ce76..aeee28081245 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -66,7 +66,7 @@ typedef s32 codel_tdiff_t;
66 66
67static inline codel_time_t codel_get_time(void) 67static inline codel_time_t codel_get_time(void)
68{ 68{
69 u64 ns = ktime_to_ns(ktime_get()); 69 u64 ns = ktime_get_ns();
70 70
71 return ns >> CODEL_SHIFT; 71 return ns >> CODEL_SHIFT;
72} 72}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6efce384451e..58ad8c6492db 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -15,6 +15,17 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/timer.h> 16#include <linux/timer.h>
17#include <linux/workqueue.h> 17#include <linux/workqueue.h>
18#include <linux/of.h>
19#include <linux/phy.h>
20#include <linux/phy_fixed.h>
21
22enum dsa_tag_protocol {
23 DSA_TAG_PROTO_NONE = 0,
24 DSA_TAG_PROTO_DSA,
25 DSA_TAG_PROTO_TRAILER,
26 DSA_TAG_PROTO_EDSA,
27 DSA_TAG_PROTO_BRCM,
28};
18 29
19#define DSA_MAX_SWITCHES 4 30#define DSA_MAX_SWITCHES 4
20#define DSA_MAX_PORTS 12 31#define DSA_MAX_PORTS 12
@@ -23,9 +34,15 @@ struct dsa_chip_data {
23 /* 34 /*
24 * How to access the switch configuration registers. 35 * How to access the switch configuration registers.
25 */ 36 */
26 struct device *mii_bus; 37 struct device *host_dev;
27 int sw_addr; 38 int sw_addr;
28 39
40 /* Device tree node pointer for this specific switch chip
41 * used during switch setup in case additional properties
42 * and resources needs to be used
43 */
44 struct device_node *of_node;
45
29 /* 46 /*
30 * The names of the switch's ports. Use "cpu" to 47 * The names of the switch's ports. Use "cpu" to
31 * designate the switch port that the cpu is connected to, 48 * designate the switch port that the cpu is connected to,
@@ -34,6 +51,7 @@ struct dsa_chip_data {
34 * or any other string to indicate this is a physical port. 51 * or any other string to indicate this is a physical port.
35 */ 52 */
36 char *port_names[DSA_MAX_PORTS]; 53 char *port_names[DSA_MAX_PORTS];
54 struct device_node *port_dn[DSA_MAX_PORTS];
37 55
38 /* 56 /*
39 * An array (with nr_chips elements) of which element [a] 57 * An array (with nr_chips elements) of which element [a]
@@ -59,6 +77,8 @@ struct dsa_platform_data {
59 struct dsa_chip_data *chip; 77 struct dsa_chip_data *chip;
60}; 78};
61 79
80struct packet_type;
81
62struct dsa_switch_tree { 82struct dsa_switch_tree {
63 /* 83 /*
64 * Configuration data for the platform device that owns 84 * Configuration data for the platform device that owns
@@ -71,7 +91,11 @@ struct dsa_switch_tree {
71 * protocol to use. 91 * protocol to use.
72 */ 92 */
73 struct net_device *master_netdev; 93 struct net_device *master_netdev;
74 __be16 tag_protocol; 94 int (*rcv)(struct sk_buff *skb,
95 struct net_device *dev,
96 struct packet_type *pt,
97 struct net_device *orig_dev);
98 enum dsa_tag_protocol tag_protocol;
75 99
76 /* 100 /*
77 * The switch and port to which the CPU is attached. 101 * The switch and port to which the CPU is attached.
@@ -110,15 +134,16 @@ struct dsa_switch {
110 struct dsa_switch_driver *drv; 134 struct dsa_switch_driver *drv;
111 135
112 /* 136 /*
113 * Reference to mii bus to use. 137 * Reference to host device to use.
114 */ 138 */
115 struct mii_bus *master_mii_bus; 139 struct device *master_dev;
116 140
117 /* 141 /*
118 * Slave mii_bus and devices for the individual ports. 142 * Slave mii_bus and devices for the individual ports.
119 */ 143 */
120 u32 dsa_port_mask; 144 u32 dsa_port_mask;
121 u32 phys_port_mask; 145 u32 phys_port_mask;
146 u32 phys_mii_mask;
122 struct mii_bus *slave_mii_bus; 147 struct mii_bus *slave_mii_bus;
123 struct net_device *ports[DSA_MAX_PORTS]; 148 struct net_device *ports[DSA_MAX_PORTS];
124}; 149};
@@ -147,15 +172,16 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
147struct dsa_switch_driver { 172struct dsa_switch_driver {
148 struct list_head list; 173 struct list_head list;
149 174
150 __be16 tag_protocol; 175 enum dsa_tag_protocol tag_protocol;
151 int priv_size; 176 int priv_size;
152 177
153 /* 178 /*
154 * Probing and setup. 179 * Probing and setup.
155 */ 180 */
156 char *(*probe)(struct mii_bus *bus, int sw_addr); 181 char *(*probe)(struct device *host_dev, int sw_addr);
157 int (*setup)(struct dsa_switch *ds); 182 int (*setup)(struct dsa_switch *ds);
158 int (*set_addr)(struct dsa_switch *ds, u8 *addr); 183 int (*set_addr)(struct dsa_switch *ds, u8 *addr);
184 u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
159 185
160 /* 186 /*
161 * Access to the switch's PHY registers. 187 * Access to the switch's PHY registers.
@@ -170,37 +196,64 @@ struct dsa_switch_driver {
170 void (*poll_link)(struct dsa_switch *ds); 196 void (*poll_link)(struct dsa_switch *ds);
171 197
172 /* 198 /*
199 * Link state adjustment (called from libphy)
200 */
201 void (*adjust_link)(struct dsa_switch *ds, int port,
202 struct phy_device *phydev);
203 void (*fixed_link_update)(struct dsa_switch *ds, int port,
204 struct fixed_phy_status *st);
205
206 /*
173 * ethtool hardware statistics. 207 * ethtool hardware statistics.
174 */ 208 */
175 void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); 209 void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
176 void (*get_ethtool_stats)(struct dsa_switch *ds, 210 void (*get_ethtool_stats)(struct dsa_switch *ds,
177 int port, uint64_t *data); 211 int port, uint64_t *data);
178 int (*get_sset_count)(struct dsa_switch *ds); 212 int (*get_sset_count)(struct dsa_switch *ds);
213
214 /*
215 * ethtool Wake-on-LAN
216 */
217 void (*get_wol)(struct dsa_switch *ds, int port,
218 struct ethtool_wolinfo *w);
219 int (*set_wol)(struct dsa_switch *ds, int port,
220 struct ethtool_wolinfo *w);
221
222 /*
223 * Suspend and resume
224 */
225 int (*suspend)(struct dsa_switch *ds);
226 int (*resume)(struct dsa_switch *ds);
227
228 /*
229 * Port enable/disable
230 */
231 int (*port_enable)(struct dsa_switch *ds, int port,
232 struct phy_device *phy);
233 void (*port_disable)(struct dsa_switch *ds, int port,
234 struct phy_device *phy);
235
236 /*
237 * EEE setttings
238 */
239 int (*set_eee)(struct dsa_switch *ds, int port,
240 struct phy_device *phydev,
241 struct ethtool_eee *e);
242 int (*get_eee)(struct dsa_switch *ds, int port,
243 struct ethtool_eee *e);
179}; 244};
180 245
181void register_switch_driver(struct dsa_switch_driver *type); 246void register_switch_driver(struct dsa_switch_driver *type);
182void unregister_switch_driver(struct dsa_switch_driver *type); 247void unregister_switch_driver(struct dsa_switch_driver *type);
248struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
183 249
184static inline void *ds_to_priv(struct dsa_switch *ds) 250static inline void *ds_to_priv(struct dsa_switch *ds)
185{ 251{
186 return (void *)(ds + 1); 252 return (void *)(ds + 1);
187} 253}
188 254
189/* 255static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
190 * The original DSA tag format and some other tag formats have no
191 * ethertype, which means that we need to add a little hack to the
192 * networking receive path to make sure that received frames get
193 * the right ->protocol assigned to them when one of those tag
194 * formats is in use.
195 */
196static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
197{
198 return !!(dst->tag_protocol == htons(ETH_P_DSA));
199}
200
201static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
202{ 256{
203 return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); 257 return dst->rcv != NULL;
204} 258}
205
206#endif 259#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 71c60f42be48..a8ae4e760778 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -480,6 +480,7 @@ void dst_init(void);
480/* Flags for xfrm_lookup flags argument. */ 480/* Flags for xfrm_lookup flags argument. */
481enum { 481enum {
482 XFRM_LOOKUP_ICMP = 1 << 0, 482 XFRM_LOOKUP_ICMP = 1 << 0,
483 XFRM_LOOKUP_QUEUE = 1 << 1,
483}; 484};
484 485
485struct flowi; 486struct flowi;
@@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
490 int flags) 491 int flags)
491{ 492{
492 return dst_orig; 493 return dst_orig;
493} 494}
495
496static inline struct dst_entry *xfrm_lookup_route(struct net *net,
497 struct dst_entry *dst_orig,
498 const struct flowi *fl,
499 struct sock *sk,
500 int flags)
501{
502 return dst_orig;
503}
494 504
495static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) 505static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
496{ 506{
@@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
502 const struct flowi *fl, struct sock *sk, 512 const struct flowi *fl, struct sock *sk,
503 int flags); 513 int flags);
504 514
515struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
516 const struct flowi *fl, struct sock *sk,
517 int flags);
518
505/* skb attached with this dst needs transformation if dst->xfrm is valid */ 519/* skb attached with this dst needs transformation if dst->xfrm is valid */
506static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) 520static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
507{ 521{
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 2f26dfb8450e..1f99a1de0e4f 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -63,7 +63,7 @@ static inline void dst_entries_add(struct dst_ops *dst, int val)
63 63
64static inline int dst_entries_init(struct dst_ops *dst) 64static inline int dst_entries_init(struct dst_ops *dst)
65{ 65{
66 return percpu_counter_init(&dst->pcpuc_entries, 0); 66 return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
67} 67}
68 68
69static inline void dst_entries_destroy(struct dst_ops *dst) 69static inline void dst_entries_destroy(struct dst_ops *dst)
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
index 6667a054763a..7ee2df083542 100644
--- a/include/net/flow_keys.h
+++ b/include/net/flow_keys.h
@@ -27,7 +27,19 @@ struct flow_keys {
27 u8 ip_proto; 27 u8 ip_proto;
28}; 28};
29 29
30bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); 30bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
31__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto); 31 void *data, __be16 proto, int nhoff, int hlen);
32static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
33{
34 return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
35}
36__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
37 void *data, int hlen_proto);
38static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
39{
40 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
41}
32u32 flow_hash_from_keys(struct flow_keys *keys); 42u32 flow_hash_from_keys(struct flow_keys *keys);
43unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
44 __be16 protocol);
33#endif 45#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ea4271dceff0..cbafa3768d48 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -6,6 +6,11 @@
6#include <linux/rtnetlink.h> 6#include <linux/rtnetlink.h>
7#include <linux/pkt_sched.h> 7#include <linux/pkt_sched.h>
8 8
9struct gnet_stats_basic_cpu {
10 struct gnet_stats_basic_packed bstats;
11 struct u64_stats_sync syncp;
12};
13
9struct gnet_dump { 14struct gnet_dump {
10 spinlock_t * lock; 15 spinlock_t * lock;
11 struct sk_buff * skb; 16 struct sk_buff * skb;
@@ -27,21 +32,29 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
27 spinlock_t *lock, struct gnet_dump *d); 32 spinlock_t *lock, struct gnet_dump *d);
28 33
29int gnet_stats_copy_basic(struct gnet_dump *d, 34int gnet_stats_copy_basic(struct gnet_dump *d,
35 struct gnet_stats_basic_cpu __percpu *cpu,
30 struct gnet_stats_basic_packed *b); 36 struct gnet_stats_basic_packed *b);
37void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
38 struct gnet_stats_basic_cpu __percpu *cpu,
39 struct gnet_stats_basic_packed *b);
31int gnet_stats_copy_rate_est(struct gnet_dump *d, 40int gnet_stats_copy_rate_est(struct gnet_dump *d,
32 const struct gnet_stats_basic_packed *b, 41 const struct gnet_stats_basic_packed *b,
33 struct gnet_stats_rate_est64 *r); 42 struct gnet_stats_rate_est64 *r);
34int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q); 43int gnet_stats_copy_queue(struct gnet_dump *d,
44 struct gnet_stats_queue __percpu *cpu_q,
45 struct gnet_stats_queue *q, __u32 qlen);
35int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); 46int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
36 47
37int gnet_stats_finish_copy(struct gnet_dump *d); 48int gnet_stats_finish_copy(struct gnet_dump *d);
38 49
39int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 50int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
51 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
40 struct gnet_stats_rate_est64 *rate_est, 52 struct gnet_stats_rate_est64 *rate_est,
41 spinlock_t *stats_lock, struct nlattr *opt); 53 spinlock_t *stats_lock, struct nlattr *opt);
42void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 54void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
43 struct gnet_stats_rate_est64 *rate_est); 55 struct gnet_stats_rate_est64 *rate_est);
44int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, 56int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
57 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
45 struct gnet_stats_rate_est64 *rate_est, 58 struct gnet_stats_rate_est64 *rate_est,
46 spinlock_t *stats_lock, struct nlattr *opt); 59 spinlock_t *stats_lock, struct nlattr *opt);
47bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 60bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93695f0e22a5..af10c2cf8a1d 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
394 return netlink_set_err(net->genl_sock, portid, group, code); 394 return netlink_set_err(net->genl_sock, portid, group, code);
395} 395}
396 396
397static inline int genl_has_listeners(struct genl_family *family,
398 struct sock *sk, unsigned int group)
399{
400 if (WARN_ON_ONCE(group >= family->n_mcgrps))
401 return -EINVAL;
402 group = family->mcgrp_offset + group;
403 return netlink_has_listeners(sk, group);
404}
397#endif /* __NET_GENERIC_NETLINK_H */ 405#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/geneve.h b/include/net/geneve.h
new file mode 100644
index 000000000000..112132cf8e2e
--- /dev/null
+++ b/include/net/geneve.h
@@ -0,0 +1,97 @@
1#ifndef __NET_GENEVE_H
2#define __NET_GENEVE_H 1
3
4#ifdef CONFIG_INET
5#include <net/udp_tunnel.h>
6#endif
7
8
9/* Geneve Header:
10 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
11 * |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
12 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
13 * | Virtual Network Identifier (VNI) | Reserved |
14 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
15 * | Variable Length Options |
16 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
17 *
18 * Option Header:
19 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
20 * | Option Class | Type |R|R|R| Length |
21 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
22 * | Variable Option Data |
23 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
24 */
25
26struct geneve_opt {
27 __be16 opt_class;
28 u8 type;
29#ifdef __LITTLE_ENDIAN_BITFIELD
30 u8 length:5;
31 u8 r3:1;
32 u8 r2:1;
33 u8 r1:1;
34#else
35 u8 r1:1;
36 u8 r2:1;
37 u8 r3:1;
38 u8 length:5;
39#endif
40 u8 opt_data[];
41};
42
43#define GENEVE_CRIT_OPT_TYPE (1 << 7)
44
45struct genevehdr {
46#ifdef __LITTLE_ENDIAN_BITFIELD
47 u8 opt_len:6;
48 u8 ver:2;
49 u8 rsvd1:6;
50 u8 critical:1;
51 u8 oam:1;
52#else
53 u8 ver:2;
54 u8 opt_len:6;
55 u8 oam:1;
56 u8 critical:1;
57 u8 rsvd1:6;
58#endif
59 __be16 proto_type;
60 u8 vni[3];
61 u8 rsvd2;
62 struct geneve_opt options[];
63};
64
65#ifdef CONFIG_INET
66struct geneve_sock;
67
68typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
69
70struct geneve_sock {
71 struct hlist_node hlist;
72 geneve_rcv_t *rcv;
73 void *rcv_data;
74 struct work_struct del_work;
75 struct socket *sock;
76 struct rcu_head rcu;
77 atomic_t refcnt;
78 struct udp_offload udp_offloads;
79};
80
81#define GENEVE_VER 0
82#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
83
84struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
85 geneve_rcv_t *rcv, void *data,
86 bool no_share, bool ipv6);
87
88void geneve_sock_release(struct geneve_sock *vs);
89
90int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
91 struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
92 __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
93 __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
94 bool xnet);
95#endif /*ifdef CONFIG_INET */
96
97#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gue.h b/include/net/gue.h
new file mode 100644
index 000000000000..b6c332788084
--- /dev/null
+++ b/include/net/gue.h
@@ -0,0 +1,23 @@
1#ifndef __NET_GUE_H
2#define __NET_GUE_H
3
4struct guehdr {
5 union {
6 struct {
7#if defined(__LITTLE_ENDIAN_BITFIELD)
8 __u8 hlen:4,
9 version:4;
10#elif defined (__BIG_ENDIAN_BITFIELD)
11 __u8 version:4,
12 hlen:4;
13#else
14#error "Please fix <asm/byteorder.h>"
15#endif
16 __u8 next_hdr;
17 __u16 flags;
18 };
19 __u32 word;
20 };
21};
22
23#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d07b1a64b4e7..55a8d4056cc9 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -35,7 +35,6 @@ enum {
35 INET6_IFADDR_STATE_DAD, 35 INET6_IFADDR_STATE_DAD,
36 INET6_IFADDR_STATE_POSTDAD, 36 INET6_IFADDR_STATE_POSTDAD,
37 INET6_IFADDR_STATE_ERRDAD, 37 INET6_IFADDR_STATE_ERRDAD,
38 INET6_IFADDR_STATE_UP,
39 INET6_IFADDR_STATE_DEAD, 38 INET6_IFADDR_STATE_DEAD,
40}; 39};
41 40
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5fbe6568c3cf..848e85cb5c61 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
242#endif 242#endif
243} 243}
244 244
245static inline unsigned long
246inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
247 unsigned long max_when)
248{
249 u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
250
251 return (unsigned long)min_t(u64, when, max_when);
252}
253
245struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); 254struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
246 255
247struct request_sock *inet_csk_search_req(const struct sock *sk, 256struct request_sock *inet_csk_search_req(const struct sock *sk,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 65a8855e99fe..8d1765577acc 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -151,7 +151,7 @@ static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
151 151
152static inline void init_frag_mem_limit(struct netns_frags *nf) 152static inline void init_frag_mem_limit(struct netns_frags *nf)
153{ 153{
154 percpu_counter_init(&nf->mem, 0); 154 percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
155} 155}
156 156
157static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) 157static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 01d590ee5e7e..80479abddf73 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -61,7 +61,6 @@ struct inet_peer {
61struct inet_peer_base { 61struct inet_peer_base {
62 struct inet_peer __rcu *root; 62 struct inet_peer __rcu *root;
63 seqlock_t lock; 63 seqlock_t lock;
64 u32 flush_seq;
65 int total; 64 int total;
66}; 65};
67 66
diff --git a/include/net/ip.h b/include/net/ip.h
index db4a771b9ef3..0bb620702929 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -180,8 +180,10 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
180 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 180 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
181} 181}
182 182
183void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, 183void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
184 __be32 saddr, const struct ip_reply_arg *arg, 184 const struct ip_options *sopt,
185 __be32 daddr, __be32 saddr,
186 const struct ip_reply_arg *arg,
185 unsigned int len); 187 unsigned int len);
186 188
187#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) 189#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
@@ -229,8 +231,6 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
229} 231}
230#endif 232#endif
231 233
232extern int sysctl_ip_nonlocal_bind;
233
234/* From inetpeer.c */ 234/* From inetpeer.c */
235extern int inet_peer_threshold; 235extern int inet_peer_threshold;
236extern int inet_peer_minttl; 236extern int inet_peer_minttl;
@@ -364,6 +364,14 @@ static inline void inet_set_txhash(struct sock *sk)
364 sk->sk_txhash = flow_hash_from_keys(&keys); 364 sk->sk_txhash = flow_hash_from_keys(&keys);
365} 365}
366 366
367static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
368{
369 const struct iphdr *iph = skb_gro_network_header(skb);
370
371 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
372 skb_gro_len(skb), proto, 0);
373}
374
367/* 375/*
368 * Map a multicast IP onto multicast MAC for type ethernet. 376 * Map a multicast IP onto multicast MAC for type ethernet.
369 */ 377 */
@@ -505,7 +513,14 @@ int ip_forward(struct sk_buff *skb);
505 513
506void ip_options_build(struct sk_buff *skb, struct ip_options *opt, 514void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
507 __be32 daddr, struct rtable *rt, int is_frag); 515 __be32 daddr, struct rtable *rt, int is_frag);
508int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); 516
517int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
518 const struct ip_options *sopt);
519static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
520{
521 return __ip_options_echo(dopt, skb, &IPCB(skb)->opt);
522}
523
509void ip_options_fragment(struct sk_buff *skb); 524void ip_options_fragment(struct sk_buff *skb);
510int ip_options_compile(struct net *net, struct ip_options *opt, 525int ip_options_compile(struct net *net, struct ip_options *opt,
511 struct sk_buff *skb); 526 struct sk_buff *skb);
@@ -542,6 +557,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
542void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 557void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
543 u32 info); 558 u32 info);
544 559
560bool icmp_global_allow(void);
561extern int sysctl_icmp_msgs_per_sec;
562extern int sysctl_icmp_msgs_burst;
563
545#ifdef CONFIG_PROC_FS 564#ifdef CONFIG_PROC_FS
546int ip_misc_proc_init(void); 565int ip_misc_proc_init(void);
547#endif 566#endif
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 55236cb71174..1a49b73f7f6e 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -48,6 +48,14 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
48 skb->len, proto, 0)); 48 skb->len, proto, 0));
49} 49}
50 50
51static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
52{
53 const struct ipv6hdr *iph = skb_gro_network_header(skb);
54
55 return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
56 skb_gro_len(skb), proto, 0));
57}
58
51static __inline__ __sum16 tcp_v6_check(int len, 59static __inline__ __sum16 tcp_v6_check(int len,
52 const struct in6_addr *saddr, 60 const struct in6_addr *saddr,
53 const struct in6_addr *daddr, 61 const struct in6_addr *daddr,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 9bcb220bd4ad..8eea35d32a75 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -64,7 +64,7 @@ struct fib6_node {
64 64
65 __u16 fn_bit; /* bit key */ 65 __u16 fn_bit; /* bit key */
66 __u16 fn_flags; 66 __u16 fn_flags;
67 __u32 fn_sernum; 67 int fn_sernum;
68 struct rt6_info *rr_ptr; 68 struct rt6_info *rr_ptr;
69}; 69};
70 70
@@ -114,16 +114,13 @@ struct rt6_info {
114 u32 rt6i_flags; 114 u32 rt6i_flags;
115 struct rt6key rt6i_src; 115 struct rt6key rt6i_src;
116 struct rt6key rt6i_prefsrc; 116 struct rt6key rt6i_prefsrc;
117 u32 rt6i_metric;
118 117
119 struct inet6_dev *rt6i_idev; 118 struct inet6_dev *rt6i_idev;
120 unsigned long _rt6i_peer; 119 unsigned long _rt6i_peer;
121 120
122 u32 rt6i_genid; 121 u32 rt6i_metric;
123
124 /* more non-fragment space at head required */ 122 /* more non-fragment space at head required */
125 unsigned short rt6i_nfheader_len; 123 unsigned short rt6i_nfheader_len;
126
127 u8 rt6i_protocol; 124 u8 rt6i_protocol;
128}; 125};
129 126
@@ -205,15 +202,25 @@ static inline void ip6_rt_put(struct rt6_info *rt)
205 dst_release(&rt->dst); 202 dst_release(&rt->dst);
206} 203}
207 204
208struct fib6_walker_t { 205enum fib6_walk_state {
206#ifdef CONFIG_IPV6_SUBTREES
207 FWS_S,
208#endif
209 FWS_L,
210 FWS_R,
211 FWS_C,
212 FWS_U
213};
214
215struct fib6_walker {
209 struct list_head lh; 216 struct list_head lh;
210 struct fib6_node *root, *node; 217 struct fib6_node *root, *node;
211 struct rt6_info *leaf; 218 struct rt6_info *leaf;
212 unsigned char state; 219 enum fib6_walk_state state;
213 unsigned char prune; 220 bool prune;
214 unsigned int skip; 221 unsigned int skip;
215 unsigned int count; 222 unsigned int count;
216 int (*func)(struct fib6_walker_t *); 223 int (*func)(struct fib6_walker *);
217 void *args; 224 void *args;
218}; 225};
219 226
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9922093f575e..dc9d2a27c315 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -65,7 +65,8 @@ struct fnhe_hash_bucket {
65 struct fib_nh_exception __rcu *chain; 65 struct fib_nh_exception __rcu *chain;
66}; 66};
67 67
68#define FNHE_HASH_SIZE 2048 68#define FNHE_HASH_SHIFT 11
69#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
69#define FNHE_RECLAIM_DEPTH 5 70#define FNHE_RECLAIM_DEPTH 5
70 71
71struct fib_nh { 72struct fib_nh {
@@ -87,7 +88,7 @@ struct fib_nh {
87 int nh_saddr_genid; 88 int nh_saddr_genid;
88 struct rtable __rcu * __percpu *nh_pcpu_rth_output; 89 struct rtable __rcu * __percpu *nh_pcpu_rth_output;
89 struct rtable __rcu *nh_rth_input; 90 struct rtable __rcu *nh_rth_input;
90 struct fnhe_hash_bucket *nh_exceptions; 91 struct fnhe_hash_bucket __rcu *nh_exceptions;
91}; 92};
92 93
93/* 94/*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 8dd8cab88b87..5bc6edeb7143 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -10,6 +10,7 @@
10#include <net/gro_cells.h> 10#include <net/gro_cells.h>
11#include <net/inet_ecn.h> 11#include <net/inet_ecn.h>
12#include <net/ip.h> 12#include <net/ip.h>
13#include <net/netns/generic.h>
13#include <net/rtnetlink.h> 14#include <net/rtnetlink.h>
14 15
15#if IS_ENABLED(CONFIG_IPV6) 16#if IS_ENABLED(CONFIG_IPV6)
@@ -31,6 +32,13 @@ struct ip_tunnel_6rd_parm {
31}; 32};
32#endif 33#endif
33 34
35struct ip_tunnel_encap {
36 __u16 type;
37 __u16 flags;
38 __be16 sport;
39 __be16 dport;
40};
41
34struct ip_tunnel_prl_entry { 42struct ip_tunnel_prl_entry {
35 struct ip_tunnel_prl_entry __rcu *next; 43 struct ip_tunnel_prl_entry __rcu *next;
36 __be32 addr; 44 __be32 addr;
@@ -56,13 +64,18 @@ struct ip_tunnel {
56 /* These four fields used only by GRE */ 64 /* These four fields used only by GRE */
57 __u32 i_seqno; /* The last seen seqno */ 65 __u32 i_seqno; /* The last seen seqno */
58 __u32 o_seqno; /* The last output seqno */ 66 __u32 o_seqno; /* The last output seqno */
59 int hlen; /* Precalculated header length */ 67 int tun_hlen; /* Precalculated header length */
60 int mlink; 68 int mlink;
61 69
62 struct ip_tunnel_dst __percpu *dst_cache; 70 struct ip_tunnel_dst __percpu *dst_cache;
63 71
64 struct ip_tunnel_parm parms; 72 struct ip_tunnel_parm parms;
65 73
74 int encap_hlen; /* Encap header length (FOU,GUE) */
75 struct ip_tunnel_encap encap;
76
77 int hlen; /* tun_hlen + encap_hlen */
78
66 /* for SIT */ 79 /* for SIT */
67#ifdef CONFIG_IPV6_SIT_6RD 80#ifdef CONFIG_IPV6_SIT_6RD
68 struct ip_tunnel_6rd_parm ip6rd; 81 struct ip_tunnel_6rd_parm ip6rd;
@@ -73,15 +86,18 @@ struct ip_tunnel {
73 struct gro_cells gro_cells; 86 struct gro_cells gro_cells;
74}; 87};
75 88
76#define TUNNEL_CSUM __cpu_to_be16(0x01) 89#define TUNNEL_CSUM __cpu_to_be16(0x01)
77#define TUNNEL_ROUTING __cpu_to_be16(0x02) 90#define TUNNEL_ROUTING __cpu_to_be16(0x02)
78#define TUNNEL_KEY __cpu_to_be16(0x04) 91#define TUNNEL_KEY __cpu_to_be16(0x04)
79#define TUNNEL_SEQ __cpu_to_be16(0x08) 92#define TUNNEL_SEQ __cpu_to_be16(0x08)
80#define TUNNEL_STRICT __cpu_to_be16(0x10) 93#define TUNNEL_STRICT __cpu_to_be16(0x10)
81#define TUNNEL_REC __cpu_to_be16(0x20) 94#define TUNNEL_REC __cpu_to_be16(0x20)
82#define TUNNEL_VERSION __cpu_to_be16(0x40) 95#define TUNNEL_VERSION __cpu_to_be16(0x40)
83#define TUNNEL_NO_KEY __cpu_to_be16(0x80) 96#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
84#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100) 97#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
98#define TUNNEL_OAM __cpu_to_be16(0x0200)
99#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
100#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
85 101
86struct tnl_ptk_info { 102struct tnl_ptk_info {
87 __be16 flags; 103 __be16 flags;
@@ -114,6 +130,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
114void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 130void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
115 const struct iphdr *tnl_params, const u8 protocol); 131 const struct iphdr *tnl_params, const u8 protocol);
116int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); 132int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
133int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
134 u8 *protocol, struct flowi4 *fl4);
117int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 135int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
118 136
119struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, 137struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
@@ -131,6 +149,8 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
131 struct ip_tunnel_parm *p); 149 struct ip_tunnel_parm *p);
132void ip_tunnel_setup(struct net_device *dev, int net_id); 150void ip_tunnel_setup(struct net_device *dev, int net_id);
133void ip_tunnel_dst_reset_all(struct ip_tunnel *t); 151void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
152int ip_tunnel_encap_setup(struct ip_tunnel *t,
153 struct ip_tunnel_encap *ipencap);
134 154
135/* Extract dsfield from inner protocol */ 155/* Extract dsfield from inner protocol */
136static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, 156static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 624a8a54806d..615b20b58545 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1,6 +1,5 @@
1/* 1/* IP Virtual Server
2 * IP Virtual Server 2 * data structure and functionality definitions
3 * data structure and functionality definitions
4 */ 3 */
5 4
6#ifndef _NET_IP_VS_H 5#ifndef _NET_IP_VS_H
@@ -12,7 +11,7 @@
12 11
13#include <linux/list.h> /* for struct list_head */ 12#include <linux/list.h> /* for struct list_head */
14#include <linux/spinlock.h> /* for struct rwlock_t */ 13#include <linux/spinlock.h> /* for struct rwlock_t */
15#include <linux/atomic.h> /* for struct atomic_t */ 14#include <linux/atomic.h> /* for struct atomic_t */
16#include <linux/compiler.h> 15#include <linux/compiler.h>
17#include <linux/timer.h> 16#include <linux/timer.h>
18#include <linux/bug.h> 17#include <linux/bug.h>
@@ -30,15 +29,13 @@
30#endif 29#endif
31#include <net/net_namespace.h> /* Netw namespace */ 30#include <net/net_namespace.h> /* Netw namespace */
32 31
33/* 32/* Generic access of ipvs struct */
34 * Generic access of ipvs struct
35 */
36static inline struct netns_ipvs *net_ipvs(struct net* net) 33static inline struct netns_ipvs *net_ipvs(struct net* net)
37{ 34{
38 return net->ipvs; 35 return net->ipvs;
39} 36}
40/* 37
41 * Get net ptr from skb in traffic cases 38/* Get net ptr from skb in traffic cases
42 * use skb_sknet when call is from userland (ioctl or netlink) 39 * use skb_sknet when call is from userland (ioctl or netlink)
43 */ 40 */
44static inline struct net *skb_net(const struct sk_buff *skb) 41static inline struct net *skb_net(const struct sk_buff *skb)
@@ -90,8 +87,8 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
90 return &init_net; 87 return &init_net;
91#endif 88#endif
92} 89}
93/* 90
94 * This one needed for single_open_net since net is stored directly in 91/* This one needed for single_open_net since net is stored directly in
95 * private not as a struct i.e. seq_file_net can't be used. 92 * private not as a struct i.e. seq_file_net can't be used.
96 */ 93 */
97static inline struct net *seq_file_single_net(struct seq_file *seq) 94static inline struct net *seq_file_single_net(struct seq_file *seq)
@@ -108,7 +105,7 @@ extern int ip_vs_conn_tab_size;
108 105
109struct ip_vs_iphdr { 106struct ip_vs_iphdr {
110 __u32 len; /* IPv4 simply where L4 starts 107 __u32 len; /* IPv4 simply where L4 starts
111 IPv6 where L4 Transport Header starts */ 108 * IPv6 where L4 Transport Header starts */
112 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 109 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
113 __s16 protocol; 110 __s16 protocol;
114 __s32 flags; 111 __s32 flags;
@@ -304,16 +301,11 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
304#define LeaveFunction(level) do {} while (0) 301#define LeaveFunction(level) do {} while (0)
305#endif 302#endif
306 303
307 304/* The port number of FTP service (in network order). */
308/*
309 * The port number of FTP service (in network order).
310 */
311#define FTPPORT cpu_to_be16(21) 305#define FTPPORT cpu_to_be16(21)
312#define FTPDATA cpu_to_be16(20) 306#define FTPDATA cpu_to_be16(20)
313 307
314/* 308/* TCP State Values */
315 * TCP State Values
316 */
317enum { 309enum {
318 IP_VS_TCP_S_NONE = 0, 310 IP_VS_TCP_S_NONE = 0,
319 IP_VS_TCP_S_ESTABLISHED, 311 IP_VS_TCP_S_ESTABLISHED,
@@ -329,25 +321,19 @@ enum {
329 IP_VS_TCP_S_LAST 321 IP_VS_TCP_S_LAST
330}; 322};
331 323
332/* 324/* UDP State Values */
333 * UDP State Values
334 */
335enum { 325enum {
336 IP_VS_UDP_S_NORMAL, 326 IP_VS_UDP_S_NORMAL,
337 IP_VS_UDP_S_LAST, 327 IP_VS_UDP_S_LAST,
338}; 328};
339 329
340/* 330/* ICMP State Values */
341 * ICMP State Values
342 */
343enum { 331enum {
344 IP_VS_ICMP_S_NORMAL, 332 IP_VS_ICMP_S_NORMAL,
345 IP_VS_ICMP_S_LAST, 333 IP_VS_ICMP_S_LAST,
346}; 334};
347 335
348/* 336/* SCTP State Values */
349 * SCTP State Values
350 */
351enum ip_vs_sctp_states { 337enum ip_vs_sctp_states {
352 IP_VS_SCTP_S_NONE, 338 IP_VS_SCTP_S_NONE,
353 IP_VS_SCTP_S_INIT1, 339 IP_VS_SCTP_S_INIT1,
@@ -366,21 +352,18 @@ enum ip_vs_sctp_states {
366 IP_VS_SCTP_S_LAST 352 IP_VS_SCTP_S_LAST
367}; 353};
368 354
369/* 355/* Delta sequence info structure
370 * Delta sequence info structure 356 * Each ip_vs_conn has 2 (output AND input seq. changes).
371 * Each ip_vs_conn has 2 (output AND input seq. changes). 357 * Only used in the VS/NAT.
372 * Only used in the VS/NAT.
373 */ 358 */
374struct ip_vs_seq { 359struct ip_vs_seq {
375 __u32 init_seq; /* Add delta from this seq */ 360 __u32 init_seq; /* Add delta from this seq */
376 __u32 delta; /* Delta in sequence numbers */ 361 __u32 delta; /* Delta in sequence numbers */
377 __u32 previous_delta; /* Delta in sequence numbers 362 __u32 previous_delta; /* Delta in sequence numbers
378 before last resized pkt */ 363 * before last resized pkt */
379}; 364};
380 365
381/* 366/* counters per cpu */
382 * counters per cpu
383 */
384struct ip_vs_counters { 367struct ip_vs_counters {
385 __u32 conns; /* connections scheduled */ 368 __u32 conns; /* connections scheduled */
386 __u32 inpkts; /* incoming packets */ 369 __u32 inpkts; /* incoming packets */
@@ -388,17 +371,13 @@ struct ip_vs_counters {
388 __u64 inbytes; /* incoming bytes */ 371 __u64 inbytes; /* incoming bytes */
389 __u64 outbytes; /* outgoing bytes */ 372 __u64 outbytes; /* outgoing bytes */
390}; 373};
391/* 374/* Stats per cpu */
392 * Stats per cpu
393 */
394struct ip_vs_cpu_stats { 375struct ip_vs_cpu_stats {
395 struct ip_vs_counters ustats; 376 struct ip_vs_counters ustats;
396 struct u64_stats_sync syncp; 377 struct u64_stats_sync syncp;
397}; 378};
398 379
399/* 380/* IPVS statistics objects */
400 * IPVS statistics objects
401 */
402struct ip_vs_estimator { 381struct ip_vs_estimator {
403 struct list_head list; 382 struct list_head list;
404 383
@@ -491,9 +470,7 @@ struct ip_vs_protocol {
491 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 470 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
492}; 471};
493 472
494/* 473/* protocol data per netns */
495 * protocol data per netns
496 */
497struct ip_vs_proto_data { 474struct ip_vs_proto_data {
498 struct ip_vs_proto_data *next; 475 struct ip_vs_proto_data *next;
499 struct ip_vs_protocol *pp; 476 struct ip_vs_protocol *pp;
@@ -520,9 +497,7 @@ struct ip_vs_conn_param {
520 __u8 pe_data_len; 497 __u8 pe_data_len;
521}; 498};
522 499
523/* 500/* IP_VS structure allocated for each dynamically scheduled connection */
524 * IP_VS structure allocated for each dynamically scheduled connection
525 */
526struct ip_vs_conn { 501struct ip_vs_conn {
527 struct hlist_node c_list; /* hashed list heads */ 502 struct hlist_node c_list; /* hashed list heads */
528 /* Protocol, addresses and port numbers */ 503 /* Protocol, addresses and port numbers */
@@ -535,6 +510,7 @@ struct ip_vs_conn {
535 union nf_inet_addr daddr; /* destination address */ 510 union nf_inet_addr daddr; /* destination address */
536 volatile __u32 flags; /* status flags */ 511 volatile __u32 flags; /* status flags */
537 __u16 protocol; /* Which protocol (TCP/UDP) */ 512 __u16 protocol; /* Which protocol (TCP/UDP) */
513 __u16 daf; /* Address family of the dest */
538#ifdef CONFIG_NET_NS 514#ifdef CONFIG_NET_NS
539 struct net *net; /* Name space */ 515 struct net *net; /* Name space */
540#endif 516#endif
@@ -560,17 +536,18 @@ struct ip_vs_conn {
560 struct ip_vs_dest *dest; /* real server */ 536 struct ip_vs_dest *dest; /* real server */
561 atomic_t in_pkts; /* incoming packet counter */ 537 atomic_t in_pkts; /* incoming packet counter */
562 538
563 /* packet transmitter for different forwarding methods. If it 539 /* Packet transmitter for different forwarding methods. If it
564 mangles the packet, it must return NF_DROP or better NF_STOLEN, 540 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
565 otherwise this must be changed to a sk_buff **. 541 * otherwise this must be changed to a sk_buff **.
566 NF_ACCEPT can be returned when destination is local. 542 * NF_ACCEPT can be returned when destination is local.
567 */ 543 */
568 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 544 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
569 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 545 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
570 546
571 /* Note: we can group the following members into a structure, 547 /* Note: we can group the following members into a structure,
572 in order to save more space, and the following members are 548 * in order to save more space, and the following members are
573 only used in VS/NAT anyway */ 549 * only used in VS/NAT anyway
550 */
574 struct ip_vs_app *app; /* bound ip_vs_app object */ 551 struct ip_vs_app *app; /* bound ip_vs_app object */
575 void *app_data; /* Application private data */ 552 void *app_data; /* Application private data */
576 struct ip_vs_seq in_seq; /* incoming seq. struct */ 553 struct ip_vs_seq in_seq; /* incoming seq. struct */
@@ -583,9 +560,7 @@ struct ip_vs_conn {
583 struct rcu_head rcu_head; 560 struct rcu_head rcu_head;
584}; 561};
585 562
586/* 563/* To save some memory in conn table when name space is disabled. */
587 * To save some memory in conn table when name space is disabled.
588 */
589static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) 564static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
590{ 565{
591#ifdef CONFIG_NET_NS 566#ifdef CONFIG_NET_NS
@@ -594,6 +569,7 @@ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
594 return &init_net; 569 return &init_net;
595#endif 570#endif
596} 571}
572
597static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) 573static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
598{ 574{
599#ifdef CONFIG_NET_NS 575#ifdef CONFIG_NET_NS
@@ -611,13 +587,12 @@ static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
611#endif 587#endif
612} 588}
613 589
614/* 590/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
615 * Extended internal versions of struct ip_vs_service_user and 591 * for IPv6 support.
616 * ip_vs_dest_user for IPv6 support.
617 * 592 *
618 * We need these to conveniently pass around service and destination 593 * We need these to conveniently pass around service and destination
619 * options, but unfortunately, we also need to keep the old definitions to 594 * options, but unfortunately, we also need to keep the old definitions to
620 * maintain userspace backwards compatibility for the setsockopt interface. 595 * maintain userspace backwards compatibility for the setsockopt interface.
621 */ 596 */
622struct ip_vs_service_user_kern { 597struct ip_vs_service_user_kern {
623 /* virtual service addresses */ 598 /* virtual service addresses */
@@ -648,12 +623,15 @@ struct ip_vs_dest_user_kern {
648 /* thresholds for active connections */ 623 /* thresholds for active connections */
649 u32 u_threshold; /* upper threshold */ 624 u32 u_threshold; /* upper threshold */
650 u32 l_threshold; /* lower threshold */ 625 u32 l_threshold; /* lower threshold */
626
627 /* Address family of addr */
628 u16 af;
651}; 629};
652 630
653 631
654/* 632/*
655 * The information about the virtual service offered to the net 633 * The information about the virtual service offered to the net and the
656 * and the forwarding entries 634 * forwarding entries.
657 */ 635 */
658struct ip_vs_service { 636struct ip_vs_service {
659 struct hlist_node s_list; /* for normal service table */ 637 struct hlist_node s_list; /* for normal service table */
@@ -693,9 +671,8 @@ struct ip_vs_dest_dst {
693 struct rcu_head rcu_head; 671 struct rcu_head rcu_head;
694}; 672};
695 673
696/* 674/* The real server destination forwarding entry with ip address, port number,
697 * The real server destination forwarding entry 675 * and so on.
698 * with ip address, port number, and so on.
699 */ 676 */
700struct ip_vs_dest { 677struct ip_vs_dest {
701 struct list_head n_list; /* for the dests in the service */ 678 struct list_head n_list; /* for the dests in the service */
@@ -734,10 +711,7 @@ struct ip_vs_dest {
734 unsigned int in_rs_table:1; /* we are in rs_table */ 711 unsigned int in_rs_table:1; /* we are in rs_table */
735}; 712};
736 713
737 714/* The scheduler object */
738/*
739 * The scheduler object
740 */
741struct ip_vs_scheduler { 715struct ip_vs_scheduler {
742 struct list_head n_list; /* d-linked list head */ 716 struct list_head n_list; /* d-linked list head */
743 char *name; /* scheduler name */ 717 char *name; /* scheduler name */
@@ -777,9 +751,7 @@ struct ip_vs_pe {
777 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); 751 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
778}; 752};
779 753
780/* 754/* The application module object (a.k.a. app incarnation) */
781 * The application module object (a.k.a. app incarnation)
782 */
783struct ip_vs_app { 755struct ip_vs_app {
784 struct list_head a_list; /* member in app list */ 756 struct list_head a_list; /* member in app list */
785 int type; /* IP_VS_APP_TYPE_xxx */ 757 int type; /* IP_VS_APP_TYPE_xxx */
@@ -795,16 +767,14 @@ struct ip_vs_app {
795 atomic_t usecnt; /* usage counter */ 767 atomic_t usecnt; /* usage counter */
796 struct rcu_head rcu_head; 768 struct rcu_head rcu_head;
797 769
798 /* 770 /* output hook: Process packet in inout direction, diff set for TCP.
799 * output hook: Process packet in inout direction, diff set for TCP.
800 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 771 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
801 * 2=Mangled but checksum was not updated 772 * 2=Mangled but checksum was not updated
802 */ 773 */
803 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, 774 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
804 struct sk_buff *, int *diff); 775 struct sk_buff *, int *diff);
805 776
806 /* 777 /* input hook: Process packet in outin direction, diff set for TCP.
807 * input hook: Process packet in outin direction, diff set for TCP.
808 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 778 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
809 * 2=Mangled but checksum was not updated 779 * 2=Mangled but checksum was not updated
810 */ 780 */
@@ -863,9 +833,7 @@ struct ipvs_master_sync_state {
863struct netns_ipvs { 833struct netns_ipvs {
864 int gen; /* Generation */ 834 int gen; /* Generation */
865 int enable; /* enable like nf_hooks do */ 835 int enable; /* enable like nf_hooks do */
866 /* 836 /* Hash table: for real service lookups */
867 * Hash table: for real service lookups
868 */
869 #define IP_VS_RTAB_BITS 4 837 #define IP_VS_RTAB_BITS 4
870 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 838 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
871 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) 839 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
@@ -899,7 +867,7 @@ struct netns_ipvs {
899 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 867 struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
900#endif 868#endif
901 /* ip_vs_conn */ 869 /* ip_vs_conn */
902 atomic_t conn_count; /* connection counter */ 870 atomic_t conn_count; /* connection counter */
903 871
904 /* ip_vs_ctl */ 872 /* ip_vs_ctl */
905 struct ip_vs_stats tot_stats; /* Statistics & est. */ 873 struct ip_vs_stats tot_stats; /* Statistics & est. */
@@ -986,6 +954,10 @@ struct netns_ipvs {
986 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 954 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
987 /* net name space ptr */ 955 /* net name space ptr */
988 struct net *net; /* Needed by timer routines */ 956 struct net *net; /* Needed by timer routines */
957 /* Number of heterogeneous destinations, needed becaus heterogeneous
958 * are not supported when synchronization is enabled.
959 */
960 unsigned int mixed_address_family_dests;
989}; 961};
990 962
991#define DEFAULT_SYNC_THRESHOLD 3 963#define DEFAULT_SYNC_THRESHOLD 3
@@ -1139,9 +1111,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
1139 1111
1140#endif 1112#endif
1141 1113
1142/* 1114/* IPVS core functions
1143 * IPVS core functions 1115 * (from ip_vs_core.c)
1144 * (from ip_vs_core.c)
1145 */ 1116 */
1146const char *ip_vs_proto_name(unsigned int proto); 1117const char *ip_vs_proto_name(unsigned int proto);
1147void ip_vs_init_hash_table(struct list_head *table, int rows); 1118void ip_vs_init_hash_table(struct list_head *table, int rows);
@@ -1149,11 +1120,9 @@ void ip_vs_init_hash_table(struct list_head *table, int rows);
1149 1120
1150#define IP_VS_APP_TYPE_FTP 1 1121#define IP_VS_APP_TYPE_FTP 1
1151 1122
1152/* 1123/* ip_vs_conn handling functions
1153 * ip_vs_conn handling functions 1124 * (from ip_vs_conn.c)
1154 * (from ip_vs_conn.c)
1155 */ 1125 */
1156
1157enum { 1126enum {
1158 IP_VS_DIR_INPUT = 0, 1127 IP_VS_DIR_INPUT = 0,
1159 IP_VS_DIR_OUTPUT, 1128 IP_VS_DIR_OUTPUT,
@@ -1210,7 +1179,7 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
1210void ip_vs_conn_put(struct ip_vs_conn *cp); 1179void ip_vs_conn_put(struct ip_vs_conn *cp);
1211void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 1180void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
1212 1181
1213struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, 1182struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
1214 const union nf_inet_addr *daddr, 1183 const union nf_inet_addr *daddr,
1215 __be16 dport, unsigned int flags, 1184 __be16 dport, unsigned int flags,
1216 struct ip_vs_dest *dest, __u32 fwmark); 1185 struct ip_vs_dest *dest, __u32 fwmark);
@@ -1284,9 +1253,7 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
1284 atomic_inc(&ctl_cp->n_control); 1253 atomic_inc(&ctl_cp->n_control);
1285} 1254}
1286 1255
1287/* 1256/* IPVS netns init & cleanup functions */
1288 * IPVS netns init & cleanup functions
1289 */
1290int ip_vs_estimator_net_init(struct net *net); 1257int ip_vs_estimator_net_init(struct net *net);
1291int ip_vs_control_net_init(struct net *net); 1258int ip_vs_control_net_init(struct net *net);
1292int ip_vs_protocol_net_init(struct net *net); 1259int ip_vs_protocol_net_init(struct net *net);
@@ -1301,9 +1268,8 @@ void ip_vs_estimator_net_cleanup(struct net *net);
1301void ip_vs_sync_net_cleanup(struct net *net); 1268void ip_vs_sync_net_cleanup(struct net *net);
1302void ip_vs_service_net_cleanup(struct net *net); 1269void ip_vs_service_net_cleanup(struct net *net);
1303 1270
1304/* 1271/* IPVS application functions
1305 * IPVS application functions 1272 * (from ip_vs_app.c)
1306 * (from ip_vs_app.c)
1307 */ 1273 */
1308#define IP_VS_APP_MAX_PORTS 8 1274#define IP_VS_APP_MAX_PORTS 8
1309struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app); 1275struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
@@ -1323,9 +1289,7 @@ int unregister_ip_vs_pe(struct ip_vs_pe *pe);
1323struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1289struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
1324struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); 1290struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
1325 1291
1326/* 1292/* Use a #define to avoid all of module.h just for these trivial ops */
1327 * Use a #define to avoid all of module.h just for these trivial ops
1328 */
1329#define ip_vs_pe_get(pe) \ 1293#define ip_vs_pe_get(pe) \
1330 if (pe && pe->module) \ 1294 if (pe && pe->module) \
1331 __module_get(pe->module); 1295 __module_get(pe->module);
@@ -1334,9 +1298,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
1334 if (pe && pe->module) \ 1298 if (pe && pe->module) \
1335 module_put(pe->module); 1299 module_put(pe->module);
1336 1300
1337/* 1301/* IPVS protocol functions (from ip_vs_proto.c) */
1338 * IPVS protocol functions (from ip_vs_proto.c)
1339 */
1340int ip_vs_protocol_init(void); 1302int ip_vs_protocol_init(void);
1341void ip_vs_protocol_cleanup(void); 1303void ip_vs_protocol_cleanup(void);
1342void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); 1304void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
@@ -1354,9 +1316,8 @@ extern struct ip_vs_protocol ip_vs_protocol_esp;
1354extern struct ip_vs_protocol ip_vs_protocol_ah; 1316extern struct ip_vs_protocol ip_vs_protocol_ah;
1355extern struct ip_vs_protocol ip_vs_protocol_sctp; 1317extern struct ip_vs_protocol ip_vs_protocol_sctp;
1356 1318
1357/* 1319/* Registering/unregistering scheduler functions
1358 * Registering/unregistering scheduler functions 1320 * (from ip_vs_sched.c)
1359 * (from ip_vs_sched.c)
1360 */ 1321 */
1361int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1322int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
1362int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1323int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
@@ -1375,10 +1336,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
1375 1336
1376void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1337void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
1377 1338
1378 1339/* IPVS control data and functions (from ip_vs_ctl.c) */
1379/*
1380 * IPVS control data and functions (from ip_vs_ctl.c)
1381 */
1382extern struct ip_vs_stats ip_vs_stats; 1340extern struct ip_vs_stats ip_vs_stats;
1383extern int sysctl_ip_vs_sync_ver; 1341extern int sysctl_ip_vs_sync_ver;
1384 1342
@@ -1396,8 +1354,9 @@ void ip_vs_unregister_nl_ioctl(void);
1396int ip_vs_control_init(void); 1354int ip_vs_control_init(void);
1397void ip_vs_control_cleanup(void); 1355void ip_vs_control_cleanup(void);
1398struct ip_vs_dest * 1356struct ip_vs_dest *
1399ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, 1357ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
1400 __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, 1358 const union nf_inet_addr *daddr, __be16 dport,
1359 const union nf_inet_addr *vaddr, __be16 vport,
1401 __u16 protocol, __u32 fwmark, __u32 flags); 1360 __u16 protocol, __u32 fwmark, __u32 flags);
1402void ip_vs_try_bind_dest(struct ip_vs_conn *cp); 1361void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
1403 1362
@@ -1418,26 +1377,21 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
1418 kfree(dest); 1377 kfree(dest);
1419} 1378}
1420 1379
1421/* 1380/* IPVS sync daemon data and function prototypes
1422 * IPVS sync daemon data and function prototypes 1381 * (from ip_vs_sync.c)
1423 * (from ip_vs_sync.c)
1424 */ 1382 */
1425int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); 1383int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
1426int stop_sync_thread(struct net *net, int state); 1384int stop_sync_thread(struct net *net, int state);
1427void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); 1385void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
1428 1386
1429/* 1387/* IPVS rate estimator prototypes (from ip_vs_est.c) */
1430 * IPVS rate estimator prototypes (from ip_vs_est.c)
1431 */
1432void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); 1388void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
1433void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); 1389void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
1434void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1390void ip_vs_zero_estimator(struct ip_vs_stats *stats);
1435void ip_vs_read_estimator(struct ip_vs_stats_user *dst, 1391void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
1436 struct ip_vs_stats *stats); 1392 struct ip_vs_stats *stats);
1437 1393
1438/* 1394/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
1439 * Various IPVS packet transmitters (from ip_vs_xmit.c)
1440 */
1441int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1395int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1442 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1396 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1443int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1397int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1468,12 +1422,10 @@ int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1468#endif 1422#endif
1469 1423
1470#ifdef CONFIG_SYSCTL 1424#ifdef CONFIG_SYSCTL
1471/* 1425/* This is a simple mechanism to ignore packets when
1472 * This is a simple mechanism to ignore packets when 1426 * we are loaded. Just set ip_vs_drop_rate to 'n' and
1473 * we are loaded. Just set ip_vs_drop_rate to 'n' and 1427 * we start to drop 1/rate of the packets
1474 * we start to drop 1/rate of the packets
1475 */ 1428 */
1476
1477static inline int ip_vs_todrop(struct netns_ipvs *ipvs) 1429static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
1478{ 1430{
1479 if (!ipvs->drop_rate) 1431 if (!ipvs->drop_rate)
@@ -1487,9 +1439,7 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
1487static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } 1439static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
1488#endif 1440#endif
1489 1441
1490/* 1442/* ip_vs_fwd_tag returns the forwarding tag of the connection */
1491 * ip_vs_fwd_tag returns the forwarding tag of the connection
1492 */
1493#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) 1443#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
1494 1444
1495static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) 1445static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
@@ -1548,9 +1498,7 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
1548 return csum_partial(diff, sizeof(diff), oldsum); 1498 return csum_partial(diff, sizeof(diff), oldsum);
1549} 1499}
1550 1500
1551/* 1501/* Forget current conntrack (unconfirmed) and attach notrack entry */
1552 * Forget current conntrack (unconfirmed) and attach notrack entry
1553 */
1554static inline void ip_vs_notrack(struct sk_buff *skb) 1502static inline void ip_vs_notrack(struct sk_buff *skb)
1555{ 1503{
1556#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1504#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -1567,9 +1515,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
1567} 1515}
1568 1516
1569#ifdef CONFIG_IP_VS_NFCT 1517#ifdef CONFIG_IP_VS_NFCT
1570/* 1518/* Netfilter connection tracking
1571 * Netfilter connection tracking 1519 * (from ip_vs_nfct.c)
1572 * (from ip_vs_nfct.c)
1573 */ 1520 */
1574static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1521static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
1575{ 1522{
@@ -1608,14 +1555,12 @@ static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
1608static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) 1555static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
1609{ 1556{
1610} 1557}
1611/* CONFIG_IP_VS_NFCT */ 1558#endif /* CONFIG_IP_VS_NFCT */
1612#endif
1613 1559
1614static inline int 1560static inline int
1615ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1561ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1616{ 1562{
1617 /* 1563 /* We think the overhead of processing active connections is 256
1618 * We think the overhead of processing active connections is 256
1619 * times higher than that of inactive connections in average. (This 1564 * times higher than that of inactive connections in average. (This
1620 * 256 times might not be accurate, we will change it later) We 1565 * 256 times might not be accurate, we will change it later) We
1621 * use the following formula to estimate the overhead now: 1566 * use the following formula to estimate the overhead now:
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a2db816e8461..97f472012438 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -121,6 +121,7 @@ struct frag_hdr {
121 121
122/* sysctls */ 122/* sysctls */
123extern int sysctl_mld_max_msf; 123extern int sysctl_mld_max_msf;
124extern int sysctl_mld_qrv;
124 125
125#define _DEVINC(net, statname, modifier, idev, field) \ 126#define _DEVINC(net, statname, modifier, idev, field) \
126({ \ 127({ \
@@ -287,7 +288,8 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
287struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, 288struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
288 struct ipv6_txoptions *opt); 289 struct ipv6_txoptions *opt);
289 290
290bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb); 291bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
292 const struct inet6_skb_parm *opt);
291 293
292static inline bool ipv6_accept_ra(struct inet6_dev *idev) 294static inline bool ipv6_accept_ra(struct inet6_dev *idev)
293{ 295{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index dae2e24616e1..0ad1f47d2dc7 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4,6 +4,7 @@
4 * Copyright 2002-2005, Devicescape Software, Inc. 4 * Copyright 2002-2005, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -1226,7 +1227,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
1226 * 1227 *
1227 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the 1228 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
1228 * driver to indicate that it requires IV generation for this 1229 * driver to indicate that it requires IV generation for this
1229 * particular key. 1230 * particular key. Setting this flag does not necessarily mean that SKBs
1231 * will have sufficient tailroom for ICV or MIC.
1230 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by 1232 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
1231 * the driver for a TKIP key if it requires Michael MIC 1233 * the driver for a TKIP key if it requires Michael MIC
1232 * generation in software. 1234 * generation in software.
@@ -1238,7 +1240,9 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
1238 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver 1240 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
1239 * if space should be prepared for the IV, but the IV 1241 * if space should be prepared for the IV, but the IV
1240 * itself should not be generated. Do not set together with 1242 * itself should not be generated. Do not set together with
1241 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. 1243 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
1244 * not necessarily mean that SKBs will have sufficient tailroom for ICV or
1245 * MIC.
1242 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received 1246 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
1243 * management frames. The flag can help drivers that have a hardware 1247 * management frames. The flag can help drivers that have a hardware
1244 * crypto implementation that doesn't deal with management frames 1248 * crypto implementation that doesn't deal with management frames
@@ -1405,7 +1409,7 @@ struct ieee80211_sta_rates {
1405 * @supp_rates: Bitmap of supported rates (per band) 1409 * @supp_rates: Bitmap of supported rates (per band)
1406 * @ht_cap: HT capabilities of this STA; restricted to our own capabilities 1410 * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
1407 * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities 1411 * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
1408 * @wme: indicates whether the STA supports WME. Only valid during AP-mode. 1412 * @wme: indicates whether the STA supports QoS/WME.
1409 * @drv_priv: data area for driver use, will always be aligned to 1413 * @drv_priv: data area for driver use, will always be aligned to
1410 * sizeof(void *), size is determined in hw information. 1414 * sizeof(void *), size is determined in hw information.
1411 * @uapsd_queues: bitmap of queues configured for uapsd. Only valid 1415 * @uapsd_queues: bitmap of queues configured for uapsd. Only valid
@@ -1533,16 +1537,6 @@ struct ieee80211_tx_control {
1533 * @IEEE80211_HW_MFP_CAPABLE: 1537 * @IEEE80211_HW_MFP_CAPABLE:
1534 * Hardware supports management frame protection (MFP, IEEE 802.11w). 1538 * Hardware supports management frame protection (MFP, IEEE 802.11w).
1535 * 1539 *
1536 * @IEEE80211_HW_SUPPORTS_STATIC_SMPS:
1537 * Hardware supports static spatial multiplexing powersave,
1538 * ie. can turn off all but one chain even on HT connections
1539 * that should be using more chains.
1540 *
1541 * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS:
1542 * Hardware supports dynamic spatial multiplexing powersave,
1543 * ie. can turn off all but one chain and then wake the rest
1544 * up as required after, for example, rts/cts handshake.
1545 *
1546 * @IEEE80211_HW_SUPPORTS_UAPSD: 1540 * @IEEE80211_HW_SUPPORTS_UAPSD:
1547 * Hardware supports Unscheduled Automatic Power Save Delivery 1541 * Hardware supports Unscheduled Automatic Power Save Delivery
1548 * (U-APSD) in managed mode. The mode is configured with 1542 * (U-APSD) in managed mode. The mode is configured with
@@ -1606,6 +1600,9 @@ struct ieee80211_tx_control {
1606 * is not enabled the default action is to disconnect when getting the 1600 * is not enabled the default action is to disconnect when getting the
1607 * CSA frame. 1601 * CSA frame.
1608 * 1602 *
1603 * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
1604 * or tailroom of TX skbs without copying them first.
1605 *
1609 * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands 1606 * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
1610 * in one command, mac80211 doesn't have to run separate scans per band. 1607 * in one command, mac80211 doesn't have to run separate scans per band.
1611 */ 1608 */
@@ -1625,8 +1622,7 @@ enum ieee80211_hw_flags {
1625 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, 1622 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
1626 IEEE80211_HW_MFP_CAPABLE = 1<<13, 1623 IEEE80211_HW_MFP_CAPABLE = 1<<13,
1627 IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, 1624 IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
1628 IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, 1625 /* free slots */
1629 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
1630 IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, 1626 IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
1631 IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, 1627 IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
1632 IEEE80211_HW_CONNECTION_MONITOR = 1<<19, 1628 IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
@@ -1639,7 +1635,7 @@ enum ieee80211_hw_flags {
1639 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, 1635 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
1640 IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, 1636 IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
1641 IEEE80211_HW_CHANCTX_STA_CSA = 1<<28, 1637 IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
1642 /* bit 29 unused */ 1638 IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29,
1643 IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30, 1639 IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30,
1644}; 1640};
1645 1641
@@ -2666,7 +2662,9 @@ enum ieee80211_roc_type {
2666 * 2662 *
2667 * @set_coverage_class: Set slot time for given coverage class as specified 2663 * @set_coverage_class: Set slot time for given coverage class as specified
2668 * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout 2664 * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
2669 * accordingly. This callback is not required and may sleep. 2665 * accordingly; coverage class equals to -1 to enable ACK timeout
2666 * estimation algorithm (dynack). To disable dynack set valid value for
2667 * coverage class. This callback is not required and may sleep.
2670 * 2668 *
2671 * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may 2669 * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
2672 * be %NULL. The callback can sleep. 2670 * be %NULL. The callback can sleep.
@@ -2950,7 +2948,7 @@ struct ieee80211_ops {
2950 int (*get_survey)(struct ieee80211_hw *hw, int idx, 2948 int (*get_survey)(struct ieee80211_hw *hw, int idx,
2951 struct survey_info *survey); 2949 struct survey_info *survey);
2952 void (*rfkill_poll)(struct ieee80211_hw *hw); 2950 void (*rfkill_poll)(struct ieee80211_hw *hw);
2953 void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); 2951 void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class);
2954#ifdef CONFIG_NL80211_TESTMODE 2952#ifdef CONFIG_NL80211_TESTMODE
2955 int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2953 int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2956 void *data, int len); 2954 void *data, int len);
diff --git a/include/net/mld.h b/include/net/mld.h
index faa1d161bf24..01d751303498 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -88,12 +88,15 @@ struct mld2_query {
88#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07) 88#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
89#define MLDV2_QQIC_MAN(value) ((value) & 0x0f) 89#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
90 90
91#define MLD_EXP_MIN_LIMIT 32768UL
92#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1)
93
91static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) 94static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
92{ 95{
93 /* RFC3810, 5.1.3. Maximum Response Code */ 96 /* RFC3810, 5.1.3. Maximum Response Code */
94 unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc); 97 unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
95 98
96 if (mc_mrc < 32768) { 99 if (mc_mrc < MLD_EXP_MIN_LIMIT) {
97 ret = mc_mrc; 100 ret = mc_mrc;
98 } else { 101 } else {
99 unsigned long mc_man, mc_exp; 102 unsigned long mc_man, mc_exp;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 47f425464f84..f60558d0254c 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -373,7 +373,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
373 return 0; 373 return 0;
374} 374}
375 375
376#ifdef CONFIG_BRIDGE_NETFILTER 376#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
377static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) 377static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
378{ 378{
379 unsigned int seq, hh_alen; 379 unsigned int seq, hh_alen;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 361d26077196..e0d64667a4b3 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -352,26 +352,12 @@ static inline void rt_genid_bump_ipv4(struct net *net)
352 atomic_inc(&net->ipv4.rt_genid); 352 atomic_inc(&net->ipv4.rt_genid);
353} 353}
354 354
355#if IS_ENABLED(CONFIG_IPV6) 355extern void (*__fib6_flush_trees)(struct net *net);
356static inline int rt_genid_ipv6(struct net *net)
357{
358 return atomic_read(&net->ipv6.rt_genid);
359}
360
361static inline void rt_genid_bump_ipv6(struct net *net)
362{
363 atomic_inc(&net->ipv6.rt_genid);
364}
365#else
366static inline int rt_genid_ipv6(struct net *net)
367{
368 return 0;
369}
370
371static inline void rt_genid_bump_ipv6(struct net *net) 356static inline void rt_genid_bump_ipv6(struct net *net)
372{ 357{
358 if (__fib6_flush_trees)
359 __fib6_flush_trees(net);
373} 360}
374#endif
375 361
376#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) 362#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
377static inline struct netns_ieee802154_lowpan * 363static inline struct netns_ieee802154_lowpan *
diff --git a/include/net/netdma.h b/include/net/netdma.h
deleted file mode 100644
index 8ba8ce284eeb..000000000000
--- a/include/net/netdma.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef NETDMA_H
22#define NETDMA_H
23#ifdef CONFIG_NET_DMA
24#include <linux/dmaengine.h>
25#include <linux/skbuff.h>
26
27int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
28 struct sk_buff *skb, int offset, struct iovec *to,
29 size_t len, struct dma_pinned_list *pinned_list);
30
31#endif /* CONFIG_NET_DMA */
32#endif /* NETDMA_H */
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
new file mode 100644
index 000000000000..2aa6048a55c1
--- /dev/null
+++ b/include/net/netfilter/br_netfilter.h
@@ -0,0 +1,6 @@
1#ifndef _BR_NETFILTER_H_
2#define _BR_NETFILTER_H_
3
4void br_netfilter_enable(void);
5
6#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
new file mode 100644
index 000000000000..a9c001c646da
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -0,0 +1,14 @@
1#ifndef _NF_NAT_MASQUERADE_IPV4_H_
2#define _NF_NAT_MASQUERADE_IPV4_H_
3
4#include <net/netfilter/nf_nat.h>
5
6unsigned int
7nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
8 const struct nf_nat_range *range,
9 const struct net_device *out);
10
11void nf_nat_masquerade_ipv4_register_notifier(void);
12void nf_nat_masquerade_ipv4_unregister_notifier(void);
13
14#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 931fbf812171..e8427193c777 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -1,128 +1,13 @@
1#ifndef _IPV4_NF_REJECT_H 1#ifndef _IPV4_NF_REJECT_H
2#define _IPV4_NF_REJECT_H 2#define _IPV4_NF_REJECT_H
3 3
4#include <net/ip.h> 4#include <net/icmp.h>
5#include <net/tcp.h>
6#include <net/route.h>
7#include <net/dst.h>
8 5
9static inline void nf_send_unreach(struct sk_buff *skb_in, int code) 6static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
10{ 7{
11 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 8 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
12} 9}
13 10
14/* Send RST reply */ 11void nf_send_reset(struct sk_buff *oldskb, int hook);
15static void nf_send_reset(struct sk_buff *oldskb, int hook)
16{
17 struct sk_buff *nskb;
18 const struct iphdr *oiph;
19 struct iphdr *niph;
20 const struct tcphdr *oth;
21 struct tcphdr _otcph, *tcph;
22
23 /* IP header checks: fragment. */
24 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
25 return;
26
27 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
28 sizeof(_otcph), &_otcph);
29 if (oth == NULL)
30 return;
31
32 /* No RST for RST. */
33 if (oth->rst)
34 return;
35
36 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
37 return;
38
39 /* Check checksum */
40 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
41 return;
42 oiph = ip_hdr(oldskb);
43
44 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
45 LL_MAX_HEADER, GFP_ATOMIC);
46 if (!nskb)
47 return;
48
49 skb_reserve(nskb, LL_MAX_HEADER);
50
51 skb_reset_network_header(nskb);
52 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
53 niph->version = 4;
54 niph->ihl = sizeof(struct iphdr) / 4;
55 niph->tos = 0;
56 niph->id = 0;
57 niph->frag_off = htons(IP_DF);
58 niph->protocol = IPPROTO_TCP;
59 niph->check = 0;
60 niph->saddr = oiph->daddr;
61 niph->daddr = oiph->saddr;
62
63 skb_reset_transport_header(nskb);
64 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
65 memset(tcph, 0, sizeof(*tcph));
66 tcph->source = oth->dest;
67 tcph->dest = oth->source;
68 tcph->doff = sizeof(struct tcphdr) / 4;
69
70 if (oth->ack)
71 tcph->seq = oth->ack_seq;
72 else {
73 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
74 oldskb->len - ip_hdrlen(oldskb) -
75 (oth->doff << 2));
76 tcph->ack = 1;
77 }
78
79 tcph->rst = 1;
80 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
81 niph->daddr, 0);
82 nskb->ip_summed = CHECKSUM_PARTIAL;
83 nskb->csum_start = (unsigned char *)tcph - nskb->head;
84 nskb->csum_offset = offsetof(struct tcphdr, check);
85
86 /* ip_route_me_harder expects skb->dst to be set */
87 skb_dst_set_noref(nskb, skb_dst(oldskb));
88
89 nskb->protocol = htons(ETH_P_IP);
90 if (ip_route_me_harder(nskb, RTN_UNSPEC))
91 goto free_nskb;
92
93 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
94
95 /* "Never happens" */
96 if (nskb->len > dst_mtu(skb_dst(nskb)))
97 goto free_nskb;
98
99 nf_ct_attach(nskb, oldskb);
100
101#ifdef CONFIG_BRIDGE_NETFILTER
102 /* If we use ip_local_out for bridged traffic, the MAC source on
103 * the RST will be ours, instead of the destination's. This confuses
104 * some routers/firewalls, and they drop the packet. So we need to
105 * build the eth header using the original destination's MAC as the
106 * source, and send the RST packet directly.
107 */
108 if (oldskb->nf_bridge) {
109 struct ethhdr *oeth = eth_hdr(oldskb);
110 nskb->dev = oldskb->nf_bridge->physindev;
111 niph->tot_len = htons(nskb->len);
112 ip_send_check(niph);
113 if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
114 oeth->h_source, oeth->h_dest, nskb->len) < 0)
115 goto free_nskb;
116 dev_queue_xmit(nskb);
117 } else
118#endif
119 ip_local_out(nskb);
120
121 return;
122
123 free_nskb:
124 kfree_skb(nskb);
125}
126
127 12
128#endif /* _IPV4_NF_REJECT_H */ 13#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
new file mode 100644
index 000000000000..0a13396cd390
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -0,0 +1,10 @@
1#ifndef _NF_NAT_MASQUERADE_IPV6_H_
2#define _NF_NAT_MASQUERADE_IPV6_H_
3
4unsigned int
5nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
6 const struct net_device *out);
7void nf_nat_masquerade_ipv6_register_notifier(void);
8void nf_nat_masquerade_ipv6_unregister_notifier(void);
9
10#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 710d17ed70b4..48e18810a9be 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -1,11 +1,7 @@
1#ifndef _IPV6_NF_REJECT_H 1#ifndef _IPV6_NF_REJECT_H
2#define _IPV6_NF_REJECT_H 2#define _IPV6_NF_REJECT_H
3 3
4#include <net/ipv6.h> 4#include <linux/icmpv6.h>
5#include <net/ip6_route.h>
6#include <net/ip6_fib.h>
7#include <net/ip6_checksum.h>
8#include <linux/netfilter_ipv6.h>
9 5
10static inline void 6static inline void
11nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, 7nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
@@ -17,155 +13,6 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
17 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); 13 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
18} 14}
19 15
20/* Send RST reply */ 16void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
21static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
22{
23 struct sk_buff *nskb;
24 struct tcphdr otcph, *tcph;
25 unsigned int otcplen, hh_len;
26 int tcphoff, needs_ack;
27 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
28 struct ipv6hdr *ip6h;
29#define DEFAULT_TOS_VALUE 0x0U
30 const __u8 tclass = DEFAULT_TOS_VALUE;
31 struct dst_entry *dst = NULL;
32 u8 proto;
33 __be16 frag_off;
34 struct flowi6 fl6;
35
36 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
37 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
38 pr_debug("addr is not unicast.\n");
39 return;
40 }
41
42 proto = oip6h->nexthdr;
43 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
44
45 if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
46 pr_debug("Cannot get TCP header.\n");
47 return;
48 }
49
50 otcplen = oldskb->len - tcphoff;
51
52 /* IP header checks: fragment, too short. */
53 if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
54 pr_debug("proto(%d) != IPPROTO_TCP, "
55 "or too short. otcplen = %d\n",
56 proto, otcplen);
57 return;
58 }
59
60 if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
61 BUG();
62
63 /* No RST for RST. */
64 if (otcph.rst) {
65 pr_debug("RST is set\n");
66 return;
67 }
68
69 /* Check checksum. */
70 if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
71 pr_debug("TCP checksum is invalid\n");
72 return;
73 }
74
75 memset(&fl6, 0, sizeof(fl6));
76 fl6.flowi6_proto = IPPROTO_TCP;
77 fl6.saddr = oip6h->daddr;
78 fl6.daddr = oip6h->saddr;
79 fl6.fl6_sport = otcph.dest;
80 fl6.fl6_dport = otcph.source;
81 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
82 dst = ip6_route_output(net, NULL, &fl6);
83 if (dst == NULL || dst->error) {
84 dst_release(dst);
85 return;
86 }
87 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
88 if (IS_ERR(dst))
89 return;
90
91 hh_len = (dst->dev->hard_header_len + 15)&~15;
92 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
93 + sizeof(struct tcphdr) + dst->trailer_len,
94 GFP_ATOMIC);
95
96 if (!nskb) {
97 net_dbg_ratelimited("cannot alloc skb\n");
98 dst_release(dst);
99 return;
100 }
101
102 skb_dst_set(nskb, dst);
103
104 skb_reserve(nskb, hh_len + dst->header_len);
105
106 skb_put(nskb, sizeof(struct ipv6hdr));
107 skb_reset_network_header(nskb);
108 ip6h = ipv6_hdr(nskb);
109 ip6_flow_hdr(ip6h, tclass, 0);
110 ip6h->hop_limit = ip6_dst_hoplimit(dst);
111 ip6h->nexthdr = IPPROTO_TCP;
112 ip6h->saddr = oip6h->daddr;
113 ip6h->daddr = oip6h->saddr;
114
115 skb_reset_transport_header(nskb);
116 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
117 /* Truncate to length (no data) */
118 tcph->doff = sizeof(struct tcphdr)/4;
119 tcph->source = otcph.dest;
120 tcph->dest = otcph.source;
121
122 if (otcph.ack) {
123 needs_ack = 0;
124 tcph->seq = otcph.ack_seq;
125 tcph->ack_seq = 0;
126 } else {
127 needs_ack = 1;
128 tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
129 + otcplen - (otcph.doff<<2));
130 tcph->seq = 0;
131 }
132
133 /* Reset flags */
134 ((u_int8_t *)tcph)[13] = 0;
135 tcph->rst = 1;
136 tcph->ack = needs_ack;
137 tcph->window = 0;
138 tcph->urg_ptr = 0;
139 tcph->check = 0;
140
141 /* Adjust TCP checksum */
142 tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
143 &ipv6_hdr(nskb)->daddr,
144 sizeof(struct tcphdr), IPPROTO_TCP,
145 csum_partial(tcph,
146 sizeof(struct tcphdr), 0));
147
148 nf_ct_attach(nskb, oldskb);
149
150#ifdef CONFIG_BRIDGE_NETFILTER
151 /* If we use ip6_local_out for bridged traffic, the MAC source on
152 * the RST will be ours, instead of the destination's. This confuses
153 * some routers/firewalls, and they drop the packet. So we need to
154 * build the eth header using the original destination's MAC as the
155 * source, and send the RST packet directly.
156 */
157 if (oldskb->nf_bridge) {
158 struct ethhdr *oeth = eth_hdr(oldskb);
159 nskb->dev = oldskb->nf_bridge->physindev;
160 nskb->protocol = htons(ETH_P_IPV6);
161 ip6h->payload_len = htons(sizeof(struct tcphdr));
162 if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
163 oeth->h_source, oeth->h_dest, nskb->len) < 0)
164 return;
165 dev_queue_xmit(nskb);
166 } else
167#endif
168 ip6_local_out(nskb);
169}
170 17
171#endif /* _IPV6_NF_REJECT_H */ 18#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index a71dd333ac68..344b1ab19220 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -32,10 +32,8 @@ struct nf_conn_nat {
32 struct hlist_node bysource; 32 struct hlist_node bysource;
33 struct nf_conn *ct; 33 struct nf_conn *ct;
34 union nf_conntrack_nat_help help; 34 union nf_conntrack_nat_help help;
35#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ 35#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
36 defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \ 36 IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
37 defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
38 defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
39 int masq_index; 37 int masq_index;
40#endif 38#endif
41}; 39};
@@ -68,8 +66,8 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
68 struct nf_conn_nat *nat, 66 struct nf_conn_nat *nat,
69 const struct net_device *out) 67 const struct net_device *out)
70{ 68{
71#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \ 69#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
72 IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE) 70 IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
73 return nat->masq_index && hooknum == NF_INET_POST_ROUTING && 71 return nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
74 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL && 72 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
75 nat->masq_index != out->ifindex; 73 nat->masq_index != out->ifindex;
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 5a2919b2e09a..340c013795a4 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -42,8 +42,83 @@ const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
42int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, 42int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
43 enum ip_conntrack_info ctinfo, 43 enum ip_conntrack_info ctinfo,
44 unsigned int hooknum); 44 unsigned int hooknum);
45
46unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
47 const struct net_device *in,
48 const struct net_device *out,
49 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
50 struct sk_buff *skb,
51 const struct net_device *in,
52 const struct net_device *out,
53 struct nf_conn *ct));
54
55unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
56 const struct net_device *in,
57 const struct net_device *out,
58 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
59 struct sk_buff *skb,
60 const struct net_device *in,
61 const struct net_device *out,
62 struct nf_conn *ct));
63
64unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
65 struct sk_buff *skb,
66 const struct net_device *in,
67 const struct net_device *out,
68 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
69 struct sk_buff *skb,
70 const struct net_device *in,
71 const struct net_device *out,
72 struct nf_conn *ct));
73
74unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
75 const struct net_device *in,
76 const struct net_device *out,
77 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
78 struct sk_buff *skb,
79 const struct net_device *in,
80 const struct net_device *out,
81 struct nf_conn *ct));
82
45int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, 83int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
46 enum ip_conntrack_info ctinfo, 84 enum ip_conntrack_info ctinfo,
47 unsigned int hooknum, unsigned int hdrlen); 85 unsigned int hooknum, unsigned int hdrlen);
48 86
87unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
88 const struct net_device *in,
89 const struct net_device *out,
90 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
91 struct sk_buff *skb,
92 const struct net_device *in,
93 const struct net_device *out,
94 struct nf_conn *ct));
95
96unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
97 const struct net_device *in,
98 const struct net_device *out,
99 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
100 struct sk_buff *skb,
101 const struct net_device *in,
102 const struct net_device *out,
103 struct nf_conn *ct));
104
105unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
106 struct sk_buff *skb,
107 const struct net_device *in,
108 const struct net_device *out,
109 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
110 struct sk_buff *skb,
111 const struct net_device *in,
112 const struct net_device *out,
113 struct nf_conn *ct));
114
115unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
116 const struct net_device *in,
117 const struct net_device *out,
118 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
119 struct sk_buff *skb,
120 const struct net_device *in,
121 const struct net_device *out,
122 struct nf_conn *ct));
123
49#endif /* _NF_NAT_L3PROTO_H */ 124#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c4d86198d3d6..3d7292392fac 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -241,6 +241,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
241 * @dtype: data type (verdict or numeric type defined by userspace) 241 * @dtype: data type (verdict or numeric type defined by userspace)
242 * @size: maximum set size 242 * @size: maximum set size
243 * @nelems: number of elements 243 * @nelems: number of elements
244 * @policy: set parameterization (see enum nft_set_policies)
244 * @ops: set ops 245 * @ops: set ops
245 * @flags: set flags 246 * @flags: set flags
246 * @klen: key length 247 * @klen: key length
@@ -255,6 +256,7 @@ struct nft_set {
255 u32 dtype; 256 u32 dtype;
256 u32 size; 257 u32 size;
257 u32 nelems; 258 u32 nelems;
259 u16 policy;
258 /* runtime data below here */ 260 /* runtime data below here */
259 const struct nft_set_ops *ops ____cacheline_aligned; 261 const struct nft_set_ops *ops ____cacheline_aligned;
260 u16 flags; 262 u16 flags;
diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h
new file mode 100644
index 000000000000..c72729f954f4
--- /dev/null
+++ b/include/net/netfilter/nft_masq.h
@@ -0,0 +1,16 @@
1#ifndef _NFT_MASQ_H_
2#define _NFT_MASQ_H_
3
4struct nft_masq {
5 u32 flags;
6};
7
8extern const struct nla_policy nft_masq_policy[];
9
10int nft_masq_init(const struct nft_ctx *ctx,
11 const struct nft_expr *expr,
12 const struct nlattr * const tb[]);
13
14int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr);
15
16#endif /* _NFT_MASQ_H_ */
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 36b0da2d55bb..60fa1530006b 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -14,12 +14,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
14 14
15int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr); 15int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
16 16
17void nft_reject_ipv4_eval(const struct nft_expr *expr, 17int nft_reject_icmp_code(u8 code);
18 struct nft_data data[NFT_REG_MAX + 1], 18int nft_reject_icmpv6_code(u8 code);
19 const struct nft_pktinfo *pkt);
20
21void nft_reject_ipv6_eval(const struct nft_expr *expr,
22 struct nft_data data[NFT_REG_MAX + 1],
23 const struct nft_pktinfo *pkt);
24 19
25#endif 20#endif
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
index e2070960bac0..8170f8d7052b 100644
--- a/include/net/netns/ieee802154_6lowpan.h
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -16,7 +16,6 @@ struct netns_sysctl_lowpan {
16struct netns_ieee802154_lowpan { 16struct netns_ieee802154_lowpan {
17 struct netns_sysctl_lowpan sysctl; 17 struct netns_sysctl_lowpan sysctl;
18 struct netns_frags frags; 18 struct netns_frags frags;
19 int max_dsize;
20}; 19};
21 20
22#endif 21#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index aec5e12f9f19..24945cefc4fd 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -76,6 +76,7 @@ struct netns_ipv4 {
76 int sysctl_tcp_ecn; 76 int sysctl_tcp_ecn;
77 int sysctl_ip_no_pmtu_disc; 77 int sysctl_ip_no_pmtu_disc;
78 int sysctl_ip_fwd_use_pmtu; 78 int sysctl_ip_fwd_use_pmtu;
79 int sysctl_ip_nonlocal_bind;
79 80
80 int sysctl_fwmark_reflect; 81 int sysctl_fwmark_reflect;
81 int sysctl_tcp_fwmark_accept; 82 int sysctl_tcp_fwmark_accept;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index eade27adecf3..69ae41f2098c 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -76,7 +76,7 @@ struct netns_ipv6 {
76#endif 76#endif
77#endif 77#endif
78 atomic_t dev_addr_genid; 78 atomic_t dev_addr_genid;
79 atomic_t rt_genid; 79 atomic_t fib6_sernum;
80}; 80};
81 81
82#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 82#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 3492434baf88..9da798256f0e 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -13,6 +13,19 @@ struct ctl_table_header;
13struct xfrm_policy_hash { 13struct xfrm_policy_hash {
14 struct hlist_head *table; 14 struct hlist_head *table;
15 unsigned int hmask; 15 unsigned int hmask;
16 u8 dbits4;
17 u8 sbits4;
18 u8 dbits6;
19 u8 sbits6;
20};
21
22struct xfrm_policy_hthresh {
23 struct work_struct work;
24 seqlock_t lock;
25 u8 lbits4;
26 u8 rbits4;
27 u8 lbits6;
28 u8 rbits6;
16}; 29};
17 30
18struct netns_xfrm { 31struct netns_xfrm {
@@ -41,6 +54,7 @@ struct netns_xfrm {
41 struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; 54 struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
42 unsigned int policy_count[XFRM_POLICY_MAX * 2]; 55 unsigned int policy_count[XFRM_POLICY_MAX * 2];
43 struct work_struct policy_hash_work; 56 struct work_struct policy_hash_work;
57 struct xfrm_policy_hthresh policy_hthresh;
44 58
45 59
46 struct sock *nlsk; 60 struct sock *nlsk;
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index fbfa4e471abb..9eca9ae2280c 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -2,6 +2,7 @@
2 * The NFC Controller Interface is the communication protocol between an 2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH). 3 * NFC Controller (NFCC) and a Device Host (DH).
4 * 4 *
5 * Copyright (C) 2014 Marvell International Ltd.
5 * Copyright (C) 2011 Texas Instruments, Inc. 6 * Copyright (C) 2011 Texas Instruments, Inc.
6 * 7 *
7 * Written by Ilan Elias <ilane@ti.com> 8 * Written by Ilan Elias <ilane@ti.com>
@@ -65,19 +66,18 @@
65#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02 66#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
66#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03 67#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
67#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05 68#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
68#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06 69#define NCI_NFC_V_PASSIVE_POLL_MODE 0x06
69#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80 70#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
70#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81 71#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
71#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82 72#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
72#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 73#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
73#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 74#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
74#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86
75 75
76/* NCI RF Technologies */ 76/* NCI RF Technologies */
77#define NCI_NFC_RF_TECHNOLOGY_A 0x00 77#define NCI_NFC_RF_TECHNOLOGY_A 0x00
78#define NCI_NFC_RF_TECHNOLOGY_B 0x01 78#define NCI_NFC_RF_TECHNOLOGY_B 0x01
79#define NCI_NFC_RF_TECHNOLOGY_F 0x02 79#define NCI_NFC_RF_TECHNOLOGY_F 0x02
80#define NCI_NFC_RF_TECHNOLOGY_15693 0x03 80#define NCI_NFC_RF_TECHNOLOGY_V 0x03
81 81
82/* NCI Bit Rates */ 82/* NCI Bit Rates */
83#define NCI_NFC_BIT_RATE_106 0x00 83#define NCI_NFC_BIT_RATE_106 0x00
@@ -87,6 +87,7 @@
87#define NCI_NFC_BIT_RATE_1695 0x04 87#define NCI_NFC_BIT_RATE_1695 0x04
88#define NCI_NFC_BIT_RATE_3390 0x05 88#define NCI_NFC_BIT_RATE_3390 0x05
89#define NCI_NFC_BIT_RATE_6780 0x06 89#define NCI_NFC_BIT_RATE_6780 0x06
90#define NCI_NFC_BIT_RATE_26 0x20
90 91
91/* NCI RF Protocols */ 92/* NCI RF Protocols */
92#define NCI_RF_PROTOCOL_UNKNOWN 0x00 93#define NCI_RF_PROTOCOL_UNKNOWN 0x00
@@ -95,6 +96,7 @@
95#define NCI_RF_PROTOCOL_T3T 0x03 96#define NCI_RF_PROTOCOL_T3T 0x03
96#define NCI_RF_PROTOCOL_ISO_DEP 0x04 97#define NCI_RF_PROTOCOL_ISO_DEP 0x04
97#define NCI_RF_PROTOCOL_NFC_DEP 0x05 98#define NCI_RF_PROTOCOL_NFC_DEP 0x05
99#define NCI_RF_PROTOCOL_T5T 0x06
98 100
99/* NCI RF Interfaces */ 101/* NCI RF Interfaces */
100#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00 102#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
@@ -328,6 +330,12 @@ struct rf_tech_specific_params_nfcf_poll {
328 __u8 sensf_res[18]; /* 16 or 18 Bytes */ 330 __u8 sensf_res[18]; /* 16 or 18 Bytes */
329} __packed; 331} __packed;
330 332
333struct rf_tech_specific_params_nfcv_poll {
334 __u8 res_flags;
335 __u8 dsfid;
336 __u8 uid[8]; /* 8 Bytes */
337} __packed;
338
331struct nci_rf_discover_ntf { 339struct nci_rf_discover_ntf {
332 __u8 rf_discovery_id; 340 __u8 rf_discovery_id;
333 __u8 rf_protocol; 341 __u8 rf_protocol;
@@ -338,6 +346,7 @@ struct nci_rf_discover_ntf {
338 struct rf_tech_specific_params_nfca_poll nfca_poll; 346 struct rf_tech_specific_params_nfca_poll nfca_poll;
339 struct rf_tech_specific_params_nfcb_poll nfcb_poll; 347 struct rf_tech_specific_params_nfcb_poll nfcb_poll;
340 struct rf_tech_specific_params_nfcf_poll nfcf_poll; 348 struct rf_tech_specific_params_nfcf_poll nfcf_poll;
349 struct rf_tech_specific_params_nfcv_poll nfcv_poll;
341 } rf_tech_specific_params; 350 } rf_tech_specific_params;
342 351
343 __u8 ntf_type; 352 __u8 ntf_type;
@@ -372,6 +381,7 @@ struct nci_rf_intf_activated_ntf {
372 struct rf_tech_specific_params_nfca_poll nfca_poll; 381 struct rf_tech_specific_params_nfca_poll nfca_poll;
373 struct rf_tech_specific_params_nfcb_poll nfcb_poll; 382 struct rf_tech_specific_params_nfcb_poll nfcb_poll;
374 struct rf_tech_specific_params_nfcf_poll nfcf_poll; 383 struct rf_tech_specific_params_nfcf_poll nfcf_poll;
384 struct rf_tech_specific_params_nfcv_poll nfcv_poll;
375 } rf_tech_specific_params; 385 } rf_tech_specific_params;
376 386
377 __u8 data_exch_rf_tech_and_mode; 387 __u8 data_exch_rf_tech_and_mode;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 1f9a0f5272fe..75d10e625c49 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -64,10 +64,11 @@ enum nci_state {
64struct nci_dev; 64struct nci_dev;
65 65
66struct nci_ops { 66struct nci_ops {
67 int (*open)(struct nci_dev *ndev); 67 int (*open)(struct nci_dev *ndev);
68 int (*close)(struct nci_dev *ndev); 68 int (*close)(struct nci_dev *ndev);
69 int (*send)(struct nci_dev *ndev, struct sk_buff *skb); 69 int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
70 int (*setup)(struct nci_dev *ndev); 70 int (*setup)(struct nci_dev *ndev);
71 __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
71}; 72};
72 73
73#define NCI_MAX_SUPPORTED_RF_INTERFACES 4 74#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 6da46dcf1049..bc49967e1a68 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -20,11 +20,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
20static inline unsigned long 20static inline unsigned long
21__cls_set_class(unsigned long *clp, unsigned long cl) 21__cls_set_class(unsigned long *clp, unsigned long cl)
22{ 22{
23 unsigned long old_cl; 23 return xchg(clp, cl);
24
25 old_cl = *clp;
26 *clp = cl;
27 return old_cl;
28} 24}
29 25
30static inline unsigned long 26static inline unsigned long
@@ -137,7 +133,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
137int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 133int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
138 struct nlattr **tb, struct nlattr *rate_tlv, 134 struct nlattr **tb, struct nlattr *rate_tlv,
139 struct tcf_exts *exts, bool ovr); 135 struct tcf_exts *exts, bool ovr);
140void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); 136void tcf_exts_destroy(struct tcf_exts *exts);
141void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, 137void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
142 struct tcf_exts *src); 138 struct tcf_exts *src);
143int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 139int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -170,6 +166,7 @@ struct tcf_ematch {
170 unsigned int datalen; 166 unsigned int datalen;
171 u16 matchid; 167 u16 matchid;
172 u16 flags; 168 u16 flags;
169 struct net *net;
173}; 170};
174 171
175static inline int tcf_em_is_container(struct tcf_ematch *em) 172static inline int tcf_em_is_container(struct tcf_ematch *em)
@@ -233,12 +230,11 @@ struct tcf_ematch_tree {
233struct tcf_ematch_ops { 230struct tcf_ematch_ops {
234 int kind; 231 int kind;
235 int datalen; 232 int datalen;
236 int (*change)(struct tcf_proto *, void *, 233 int (*change)(struct net *net, void *,
237 int, struct tcf_ematch *); 234 int, struct tcf_ematch *);
238 int (*match)(struct sk_buff *, struct tcf_ematch *, 235 int (*match)(struct sk_buff *, struct tcf_ematch *,
239 struct tcf_pkt_info *); 236 struct tcf_pkt_info *);
240 void (*destroy)(struct tcf_proto *, 237 void (*destroy)(struct tcf_ematch *);
241 struct tcf_ematch *);
242 int (*dump)(struct sk_buff *, struct tcf_ematch *); 238 int (*dump)(struct sk_buff *, struct tcf_ematch *);
243 struct module *owner; 239 struct module *owner;
244 struct list_head link; 240 struct list_head link;
@@ -248,7 +244,7 @@ int tcf_em_register(struct tcf_ematch_ops *);
248void tcf_em_unregister(struct tcf_ematch_ops *); 244void tcf_em_unregister(struct tcf_ematch_ops *);
249int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 245int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
250 struct tcf_ematch_tree *); 246 struct tcf_ematch_tree *);
251void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); 247void tcf_em_tree_destroy(struct tcf_ematch_tree *);
252int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 248int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
253int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 249int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
254 struct tcf_pkt_info *); 250 struct tcf_pkt_info *);
@@ -305,7 +301,7 @@ struct tcf_ematch_tree {
305}; 301};
306 302
307#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 303#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
308#define tcf_em_tree_destroy(tp, t) do { (void)(t); } while(0) 304#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
309#define tcf_em_tree_dump(skb, t, tlv) (0) 305#define tcf_em_tree_dump(skb, t, tlv) (0)
310#define tcf_em_tree_change(tp, dst, src) do { } while(0) 306#define tcf_em_tree_change(tp, dst, src) do { } while(0)
311#define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 307#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ec030cd76616..27a33833ff4a 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -50,7 +50,7 @@ typedef long psched_tdiff_t;
50 50
51static inline psched_time_t psched_get_time(void) 51static inline psched_time_t psched_get_time(void)
52{ 52{
53 return PSCHED_NS2TICKS(ktime_to_ns(ktime_get())); 53 return PSCHED_NS2TICKS(ktime_get_ns());
54} 54}
55 55
56static inline psched_tdiff_t 56static inline psched_tdiff_t
@@ -65,12 +65,12 @@ struct qdisc_watchdog {
65}; 65};
66 66
67void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); 67void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
68void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); 68void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle);
69 69
70static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, 70static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
71 psched_time_t expires) 71 psched_time_t expires)
72{ 72{
73 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); 73 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true);
74} 74}
75 75
76void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); 76void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
@@ -99,7 +99,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab);
99void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); 99void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
100int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 100int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
101 struct net_device *dev, struct netdev_queue *txq, 101 struct net_device *dev, struct netdev_queue *txq,
102 spinlock_t *root_lock); 102 spinlock_t *root_lock, bool validate);
103 103
104void __qdisc_run(struct Qdisc *q); 104void __qdisc_run(struct Qdisc *q);
105 105
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 259992444e80..dad7ab20a8cb 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -167,7 +167,7 @@ struct ieee80211_reg_rule {
167struct ieee80211_regdomain { 167struct ieee80211_regdomain {
168 struct rcu_head rcu_head; 168 struct rcu_head rcu_head;
169 u32 n_reg_rules; 169 u32 n_reg_rules;
170 char alpha2[2]; 170 char alpha2[3];
171 enum nl80211_dfs_regions dfs_region; 171 enum nl80211_dfs_regions dfs_region;
172 struct ieee80211_reg_rule reg_rules[]; 172 struct ieee80211_reg_rule reg_rules[];
173}; 173};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a3cfb8ebeb53..d17ed6fb2f70 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -6,6 +6,8 @@
6#include <linux/rcupdate.h> 6#include <linux/rcupdate.h>
7#include <linux/pkt_sched.h> 7#include <linux/pkt_sched.h>
8#include <linux/pkt_cls.h> 8#include <linux/pkt_cls.h>
9#include <linux/percpu.h>
10#include <linux/dynamic_queue_limits.h>
9#include <net/gen_stats.h> 11#include <net/gen_stats.h>
10#include <net/rtnetlink.h> 12#include <net/rtnetlink.h>
11 13
@@ -58,6 +60,7 @@ struct Qdisc {
58 * multiqueue device. 60 * multiqueue device.
59 */ 61 */
60#define TCQ_F_WARN_NONWC (1 << 16) 62#define TCQ_F_WARN_NONWC (1 << 16)
63#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
61 u32 limit; 64 u32 limit;
62 const struct Qdisc_ops *ops; 65 const struct Qdisc_ops *ops;
63 struct qdisc_size_table __rcu *stab; 66 struct qdisc_size_table __rcu *stab;
@@ -83,9 +86,15 @@ struct Qdisc {
83 */ 86 */
84 unsigned long state; 87 unsigned long state;
85 struct sk_buff_head q; 88 struct sk_buff_head q;
86 struct gnet_stats_basic_packed bstats; 89 union {
90 struct gnet_stats_basic_packed bstats;
91 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
92 } __packed;
87 unsigned int __state; 93 unsigned int __state;
88 struct gnet_stats_queue qstats; 94 union {
95 struct gnet_stats_queue qstats;
96 struct gnet_stats_queue __percpu *cpu_qstats;
97 } __packed;
89 struct rcu_head rcu_head; 98 struct rcu_head rcu_head;
90 int padded; 99 int padded;
91 atomic_t refcnt; 100 atomic_t refcnt;
@@ -111,6 +120,21 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
111 qdisc->__state &= ~__QDISC___STATE_RUNNING; 120 qdisc->__state &= ~__QDISC___STATE_RUNNING;
112} 121}
113 122
123static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
124{
125 return qdisc->flags & TCQ_F_ONETXQUEUE;
126}
127
128static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
129{
130#ifdef CONFIG_BQL
131 /* Non-BQL migrated drivers will return 0, too. */
132 return dql_avail(&txq->dql);
133#else
134 return 0;
135#endif
136}
137
114static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) 138static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
115{ 139{
116 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; 140 return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
@@ -143,7 +167,7 @@ struct Qdisc_class_ops {
143 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 167 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
144 168
145 /* Filter manipulation */ 169 /* Filter manipulation */
146 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); 170 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
147 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 171 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
148 u32 classid); 172 u32 classid);
149 void (*unbind_tcf)(struct Qdisc *, unsigned long); 173 void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -212,8 +236,8 @@ struct tcf_proto_ops {
212 236
213struct tcf_proto { 237struct tcf_proto {
214 /* Fast access part */ 238 /* Fast access part */
215 struct tcf_proto *next; 239 struct tcf_proto __rcu *next;
216 void *root; 240 void __rcu *root;
217 int (*classify)(struct sk_buff *, 241 int (*classify)(struct sk_buff *,
218 const struct tcf_proto *, 242 const struct tcf_proto *,
219 struct tcf_result *); 243 struct tcf_result *);
@@ -225,13 +249,15 @@ struct tcf_proto {
225 struct Qdisc *q; 249 struct Qdisc *q;
226 void *data; 250 void *data;
227 const struct tcf_proto_ops *ops; 251 const struct tcf_proto_ops *ops;
252 struct rcu_head rcu;
228}; 253};
229 254
230struct qdisc_skb_cb { 255struct qdisc_skb_cb {
231 unsigned int pkt_len; 256 unsigned int pkt_len;
232 u16 slave_dev_queue_mapping; 257 u16 slave_dev_queue_mapping;
233 u16 _pad; 258 u16 _pad;
234 unsigned char data[24]; 259#define QDISC_CB_PRIV_LEN 20
260 unsigned char data[QDISC_CB_PRIV_LEN];
235}; 261};
236 262
237static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 263static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
@@ -259,7 +285,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
259 285
260static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) 286static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
261{ 287{
262 return qdisc->dev_queue->qdisc; 288 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
289
290 return q;
263} 291}
264 292
265static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) 293static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
@@ -376,7 +404,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
376void __qdisc_calculate_pkt_len(struct sk_buff *skb, 404void __qdisc_calculate_pkt_len(struct sk_buff *skb,
377 const struct qdisc_size_table *stab); 405 const struct qdisc_size_table *stab);
378void tcf_destroy(struct tcf_proto *tp); 406void tcf_destroy(struct tcf_proto *tp);
379void tcf_destroy_chain(struct tcf_proto **fl); 407void tcf_destroy_chain(struct tcf_proto __rcu **fl);
380 408
381/* Reset all TX qdiscs greater then index of a device. */ 409/* Reset all TX qdiscs greater then index of a device. */
382static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 410static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -384,7 +412,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
384 struct Qdisc *qdisc; 412 struct Qdisc *qdisc;
385 413
386 for (; i < dev->num_tx_queues; i++) { 414 for (; i < dev->num_tx_queues; i++) {
387 qdisc = netdev_get_tx_queue(dev, i)->qdisc; 415 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
388 if (qdisc) { 416 if (qdisc) {
389 spin_lock_bh(qdisc_lock(qdisc)); 417 spin_lock_bh(qdisc_lock(qdisc));
390 qdisc_reset(qdisc); 418 qdisc_reset(qdisc);
@@ -402,13 +430,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev)
402static inline bool qdisc_all_tx_empty(const struct net_device *dev) 430static inline bool qdisc_all_tx_empty(const struct net_device *dev)
403{ 431{
404 unsigned int i; 432 unsigned int i;
433
434 rcu_read_lock();
405 for (i = 0; i < dev->num_tx_queues; i++) { 435 for (i = 0; i < dev->num_tx_queues; i++) {
406 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 436 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
407 const struct Qdisc *q = txq->qdisc; 437 const struct Qdisc *q = rcu_dereference(txq->qdisc);
408 438
409 if (q->q.qlen) 439 if (q->q.qlen) {
440 rcu_read_unlock();
410 return false; 441 return false;
442 }
411 } 443 }
444 rcu_read_unlock();
412 return true; 445 return true;
413} 446}
414 447
@@ -416,9 +449,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
416static inline bool qdisc_tx_changing(const struct net_device *dev) 449static inline bool qdisc_tx_changing(const struct net_device *dev)
417{ 450{
418 unsigned int i; 451 unsigned int i;
452
419 for (i = 0; i < dev->num_tx_queues; i++) { 453 for (i = 0; i < dev->num_tx_queues; i++) {
420 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 454 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
421 if (txq->qdisc != txq->qdisc_sleeping) 455 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
422 return true; 456 return true;
423 } 457 }
424 return false; 458 return false;
@@ -428,9 +462,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
428static inline bool qdisc_tx_is_noop(const struct net_device *dev) 462static inline bool qdisc_tx_is_noop(const struct net_device *dev)
429{ 463{
430 unsigned int i; 464 unsigned int i;
465
431 for (i = 0; i < dev->num_tx_queues; i++) { 466 for (i = 0; i < dev->num_tx_queues; i++) {
432 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 467 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
433 if (txq->qdisc != &noop_qdisc) 468 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
434 return false; 469 return false;
435 } 470 }
436 return true; 471 return true;
@@ -476,6 +511,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
476 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 511 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
477} 512}
478 513
514static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
515{
516 return q->flags & TCQ_F_CPUSTATS;
517}
479 518
480static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 519static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
481 const struct sk_buff *skb) 520 const struct sk_buff *skb)
@@ -484,17 +523,62 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
484 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 523 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
485} 524}
486 525
526static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
527 const struct sk_buff *skb)
528{
529 struct gnet_stats_basic_cpu *bstats =
530 this_cpu_ptr(sch->cpu_bstats);
531
532 u64_stats_update_begin(&bstats->syncp);
533 bstats_update(&bstats->bstats, skb);
534 u64_stats_update_end(&bstats->syncp);
535}
536
487static inline void qdisc_bstats_update(struct Qdisc *sch, 537static inline void qdisc_bstats_update(struct Qdisc *sch,
488 const struct sk_buff *skb) 538 const struct sk_buff *skb)
489{ 539{
490 bstats_update(&sch->bstats, skb); 540 bstats_update(&sch->bstats, skb);
491} 541}
492 542
543static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
544 const struct sk_buff *skb)
545{
546 sch->qstats.backlog -= qdisc_pkt_len(skb);
547}
548
549static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
550 const struct sk_buff *skb)
551{
552 sch->qstats.backlog += qdisc_pkt_len(skb);
553}
554
555static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
556{
557 sch->qstats.drops += count;
558}
559
560static inline void qdisc_qstats_drop(struct Qdisc *sch)
561{
562 sch->qstats.drops++;
563}
564
565static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
566{
567 struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
568
569 qstats->drops++;
570}
571
572static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
573{
574 sch->qstats.overlimits++;
575}
576
493static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 577static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
494 struct sk_buff_head *list) 578 struct sk_buff_head *list)
495{ 579{
496 __skb_queue_tail(list, skb); 580 __skb_queue_tail(list, skb);
497 sch->qstats.backlog += qdisc_pkt_len(skb); 581 qdisc_qstats_backlog_inc(sch, skb);
498 582
499 return NET_XMIT_SUCCESS; 583 return NET_XMIT_SUCCESS;
500} 584}
@@ -510,7 +594,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
510 struct sk_buff *skb = __skb_dequeue(list); 594 struct sk_buff *skb = __skb_dequeue(list);
511 595
512 if (likely(skb != NULL)) { 596 if (likely(skb != NULL)) {
513 sch->qstats.backlog -= qdisc_pkt_len(skb); 597 qdisc_qstats_backlog_dec(sch, skb);
514 qdisc_bstats_update(sch, skb); 598 qdisc_bstats_update(sch, skb);
515 } 599 }
516 600
@@ -529,7 +613,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
529 613
530 if (likely(skb != NULL)) { 614 if (likely(skb != NULL)) {
531 unsigned int len = qdisc_pkt_len(skb); 615 unsigned int len = qdisc_pkt_len(skb);
532 sch->qstats.backlog -= len; 616 qdisc_qstats_backlog_dec(sch, skb);
533 kfree_skb(skb); 617 kfree_skb(skb);
534 return len; 618 return len;
535 } 619 }
@@ -548,7 +632,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
548 struct sk_buff *skb = __skb_dequeue_tail(list); 632 struct sk_buff *skb = __skb_dequeue_tail(list);
549 633
550 if (likely(skb != NULL)) 634 if (likely(skb != NULL))
551 sch->qstats.backlog -= qdisc_pkt_len(skb); 635 qdisc_qstats_backlog_dec(sch, skb);
552 636
553 return skb; 637 return skb;
554} 638}
@@ -630,14 +714,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
630static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 714static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
631{ 715{
632 kfree_skb(skb); 716 kfree_skb(skb);
633 sch->qstats.drops++; 717 qdisc_qstats_drop(sch);
634 718
635 return NET_XMIT_DROP; 719 return NET_XMIT_DROP;
636} 720}
637 721
638static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 722static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
639{ 723{
640 sch->qstats.drops++; 724 qdisc_qstats_drop(sch);
641 725
642#ifdef CONFIG_NET_CLS_ACT 726#ifdef CONFIG_NET_CLS_ACT
643 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 727 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index f22538e68245..d4a20d00461c 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -115,7 +115,7 @@ typedef enum {
115 * analysis of the state functions, but in reality just taken from 115 * analysis of the state functions, but in reality just taken from
116 * thin air in the hopes othat we don't trigger a kernel panic. 116 * thin air in the hopes othat we don't trigger a kernel panic.
117 */ 117 */
118#define SCTP_MAX_NUM_COMMANDS 14 118#define SCTP_MAX_NUM_COMMANDS 20
119 119
120typedef union { 120typedef union {
121 void *zero_all; /* Set to NULL to clear the entire union */ 121 void *zero_all; /* Set to NULL to clear the entire union */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index f6e7397e799d..9fbd856e6713 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
320 return asoc ? asoc->assoc_id : 0; 320 return asoc ? asoc->assoc_id : 0;
321} 321}
322 322
323static inline enum sctp_sstat_state
324sctp_assoc_to_state(const struct sctp_association *asoc)
325{
326 /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
327 * got rid of it in kernel space. Therefore SCTP_CLOSED et al
328 * start at =1 in user space, but actually as =0 in kernel space.
329 * Now that we can not break user space and SCTP_EMPTY is exposed
330 * there, we need to fix it up with an ugly offset not to break
331 * applications. :(
332 */
333 return asoc->state + 1;
334}
335
323/* Look up the association by its id. */ 336/* Look up the association by its id. */
324struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id); 337struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
325 338
diff --git a/include/net/snmp.h b/include/net/snmp.h
index f1f27fdbb0d5..8fd2f498782e 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -146,19 +146,15 @@ struct linux_xfrm_mib {
146 146
147#define SNMP_ADD_STATS(mib, field, addend) \ 147#define SNMP_ADD_STATS(mib, field, addend) \
148 this_cpu_add(mib->mibs[field], addend) 148 this_cpu_add(mib->mibs[field], addend)
149/*
150 * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
151 * to make @ptr a non-percpu pointer.
152 */
153#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 149#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
154 do { \ 150 do { \
155 __typeof__(*mib->mibs) *ptr = mib->mibs; \ 151 __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
156 this_cpu_inc(ptr[basefield##PKTS]); \ 152 this_cpu_inc(ptr[basefield##PKTS]); \
157 this_cpu_add(ptr[basefield##OCTETS], addend); \ 153 this_cpu_add(ptr[basefield##OCTETS], addend); \
158 } while (0) 154 } while (0)
159#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 155#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
160 do { \ 156 do { \
161 __typeof__(*mib->mibs) *ptr = mib->mibs; \ 157 __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
162 __this_cpu_inc(ptr[basefield##PKTS]); \ 158 __this_cpu_inc(ptr[basefield##PKTS]); \
163 __this_cpu_add(ptr[basefield##OCTETS], addend); \ 159 __this_cpu_add(ptr[basefield##OCTETS], addend); \
164 } while (0) 160 } while (0)
diff --git a/include/net/sock.h b/include/net/sock.h
index 7f2ab72f321a..7db3db112baa 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -233,7 +233,6 @@ struct cg_proto;
233 * @sk_receive_queue: incoming packets 233 * @sk_receive_queue: incoming packets
234 * @sk_wmem_alloc: transmit queue bytes committed 234 * @sk_wmem_alloc: transmit queue bytes committed
235 * @sk_write_queue: Packet sending queue 235 * @sk_write_queue: Packet sending queue
236 * @sk_async_wait_queue: DMA copied packets
237 * @sk_omem_alloc: "o" is "option" or "other" 236 * @sk_omem_alloc: "o" is "option" or "other"
238 * @sk_wmem_queued: persistent queue size 237 * @sk_wmem_queued: persistent queue size
239 * @sk_forward_alloc: space allocated forward 238 * @sk_forward_alloc: space allocated forward
@@ -362,10 +361,6 @@ struct sock {
362 struct sk_filter __rcu *sk_filter; 361 struct sk_filter __rcu *sk_filter;
363 struct socket_wq __rcu *sk_wq; 362 struct socket_wq __rcu *sk_wq;
364 363
365#ifdef CONFIG_NET_DMA
366 struct sk_buff_head sk_async_wait_queue;
367#endif
368
369#ifdef CONFIG_XFRM 364#ifdef CONFIG_XFRM
370 struct xfrm_policy *sk_policy[2]; 365 struct xfrm_policy *sk_policy[2];
371#endif 366#endif
@@ -1574,7 +1569,12 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1574void sock_wfree(struct sk_buff *skb); 1569void sock_wfree(struct sk_buff *skb);
1575void skb_orphan_partial(struct sk_buff *skb); 1570void skb_orphan_partial(struct sk_buff *skb);
1576void sock_rfree(struct sk_buff *skb); 1571void sock_rfree(struct sk_buff *skb);
1572void sock_efree(struct sk_buff *skb);
1573#ifdef CONFIG_INET
1577void sock_edemux(struct sk_buff *skb); 1574void sock_edemux(struct sk_buff *skb);
1575#else
1576#define sock_edemux(skb) sock_efree(skb)
1577#endif
1578 1578
1579int sock_setsockopt(struct socket *sock, int level, int op, 1579int sock_setsockopt(struct socket *sock, int level, int op,
1580 char __user *optval, unsigned int optlen); 1580 char __user *optval, unsigned int optlen);
@@ -2041,6 +2041,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2041int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2041int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2042 2042
2043int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 2043int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2044struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2044 2045
2045/* 2046/*
2046 * Recover an error report and clear atomically 2047 * Recover an error report and clear atomically
@@ -2165,9 +2166,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2165 */ 2166 */
2166 if (sock_flag(sk, SOCK_RCVTSTAMP) || 2167 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2167 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || 2168 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2168 (kt.tv64 && 2169 (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2169 (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
2170 skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
2171 (hwtstamps->hwtstamp.tv64 && 2170 (hwtstamps->hwtstamp.tv64 &&
2172 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) 2171 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2173 __sock_recv_timestamp(msg, sk, skb); 2172 __sock_recv_timestamp(msg, sk, skb);
@@ -2195,6 +2194,8 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2195 sk->sk_stamp = skb->tstamp; 2194 sk->sk_stamp = skb->tstamp;
2196} 2195}
2197 2196
2197void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
2198
2198/** 2199/**
2199 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 2200 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
2200 * @sk: socket sending this packet 2201 * @sk: socket sending this packet
@@ -2202,33 +2203,27 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2202 * 2203 *
2203 * Note : callers should take care of initial *tx_flags value (usually 0) 2204 * Note : callers should take care of initial *tx_flags value (usually 0)
2204 */ 2205 */
2205void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); 2206static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
2207{
2208 if (unlikely(sk->sk_tsflags))
2209 __sock_tx_timestamp(sk, tx_flags);
2210 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2211 *tx_flags |= SKBTX_WIFI_STATUS;
2212}
2206 2213
2207/** 2214/**
2208 * sk_eat_skb - Release a skb if it is no longer needed 2215 * sk_eat_skb - Release a skb if it is no longer needed
2209 * @sk: socket to eat this skb from 2216 * @sk: socket to eat this skb from
2210 * @skb: socket buffer to eat 2217 * @skb: socket buffer to eat
2211 * @copied_early: flag indicating whether DMA operations copied this data early
2212 * 2218 *
2213 * This routine must be called with interrupts disabled or with the socket 2219 * This routine must be called with interrupts disabled or with the socket
2214 * locked so that the sk_buff queue operation is ok. 2220 * locked so that the sk_buff queue operation is ok.
2215*/ 2221*/
2216#ifdef CONFIG_NET_DMA 2222static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2217static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
2218{
2219 __skb_unlink(skb, &sk->sk_receive_queue);
2220 if (!copied_early)
2221 __kfree_skb(skb);
2222 else
2223 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
2224}
2225#else
2226static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
2227{ 2223{
2228 __skb_unlink(skb, &sk->sk_receive_queue); 2224 __skb_unlink(skb, &sk->sk_receive_queue);
2229 __kfree_skb(skb); 2225 __kfree_skb(skb);
2230} 2226}
2231#endif
2232 2227
2233static inline 2228static inline
2234struct net *sock_net(const struct sock *sk) 2229struct net *sock_net(const struct sock *sk)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 590e01a476ac..74efeda994b3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -27,7 +27,6 @@
27#include <linux/cache.h> 27#include <linux/cache.h>
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/dmaengine.h>
31#include <linux/crypto.h> 30#include <linux/crypto.h>
32#include <linux/cryptohash.h> 31#include <linux/cryptohash.h>
33#include <linux/kref.h> 32#include <linux/kref.h>
@@ -262,7 +261,6 @@ extern int sysctl_tcp_adv_win_scale;
262extern int sysctl_tcp_tw_reuse; 261extern int sysctl_tcp_tw_reuse;
263extern int sysctl_tcp_frto; 262extern int sysctl_tcp_frto;
264extern int sysctl_tcp_low_latency; 263extern int sysctl_tcp_low_latency;
265extern int sysctl_tcp_dma_copybreak;
266extern int sysctl_tcp_nometrics_save; 264extern int sysctl_tcp_nometrics_save;
267extern int sysctl_tcp_moderate_rcvbuf; 265extern int sysctl_tcp_moderate_rcvbuf;
268extern int sysctl_tcp_tso_win_divisor; 266extern int sysctl_tcp_tso_win_divisor;
@@ -368,7 +366,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
368void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 366void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
369 const struct tcphdr *th, unsigned int len); 367 const struct tcphdr *th, unsigned int len);
370void tcp_rcv_space_adjust(struct sock *sk); 368void tcp_rcv_space_adjust(struct sock *sk);
371void tcp_cleanup_rbuf(struct sock *sk, int copied);
372int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 369int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
373void tcp_twsk_destructor(struct sock *sk); 370void tcp_twsk_destructor(struct sock *sk);
374ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 371ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
@@ -672,6 +669,12 @@ void tcp_send_window_probe(struct sock *sk);
672 */ 669 */
673#define tcp_time_stamp ((__u32)(jiffies)) 670#define tcp_time_stamp ((__u32)(jiffies))
674 671
672static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
673{
674 return skb->skb_mstamp.stamp_jiffies;
675}
676
677
675#define tcp_flag_byte(th) (((u_int8_t *)th)[13]) 678#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
676 679
677#define TCPHDR_FIN 0x01 680#define TCPHDR_FIN 0x01
@@ -690,15 +693,18 @@ void tcp_send_window_probe(struct sock *sk);
690 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. 693 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
691 */ 694 */
692struct tcp_skb_cb { 695struct tcp_skb_cb {
693 union {
694 struct inet_skb_parm h4;
695#if IS_ENABLED(CONFIG_IPV6)
696 struct inet6_skb_parm h6;
697#endif
698 } header; /* For incoming frames */
699 __u32 seq; /* Starting sequence number */ 696 __u32 seq; /* Starting sequence number */
700 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ 697 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
701 __u32 when; /* used to compute rtt's */ 698 union {
699 /* Note : tcp_tw_isn is used in input path only
700 * (isn chosen by tcp_timewait_state_process())
701 *
702 * tcp_gso_segs is used in write queue only,
703 * cf tcp_skb_pcount()
704 */
705 __u32 tcp_tw_isn;
706 __u32 tcp_gso_segs;
707 };
702 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ 708 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
703 709
704 __u8 sacked; /* State flags for SACK/FACK. */ 710 __u8 sacked; /* State flags for SACK/FACK. */
@@ -714,33 +720,32 @@ struct tcp_skb_cb {
714 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ 720 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
715 /* 1 byte hole */ 721 /* 1 byte hole */
716 __u32 ack_seq; /* Sequence number ACK'd */ 722 __u32 ack_seq; /* Sequence number ACK'd */
723 union {
724 struct inet_skb_parm h4;
725#if IS_ENABLED(CONFIG_IPV6)
726 struct inet6_skb_parm h6;
727#endif
728 } header; /* For incoming frames */
717}; 729};
718 730
719#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 731#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
720 732
721/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set 733/* Due to TSO, an SKB can be composed of multiple actual
722 * 734 * packets. To keep these tracked properly, we use this.
723 * If we receive a SYN packet with these bits set, it means a network is
724 * playing bad games with TOS bits. In order to avoid possible false congestion
725 * notifications, we disable TCP ECN negociation.
726 */ 735 */
727static inline void 736static inline int tcp_skb_pcount(const struct sk_buff *skb)
728TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
729 struct net *net)
730{ 737{
731 const struct tcphdr *th = tcp_hdr(skb); 738 return TCP_SKB_CB(skb)->tcp_gso_segs;
739}
732 740
733 if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr && 741static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
734 INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield)) 742{
735 inet_rsk(req)->ecn_ok = 1; 743 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
736} 744}
737 745
738/* Due to TSO, an SKB can be composed of multiple actual 746static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
739 * packets. To keep these tracked properly, we use this.
740 */
741static inline int tcp_skb_pcount(const struct sk_buff *skb)
742{ 747{
743 return skb_shinfo(skb)->gso_segs; 748 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
744} 749}
745 750
746/* This is valid iff tcp_skb_pcount() > 1. */ 751/* This is valid iff tcp_skb_pcount() > 1. */
@@ -755,8 +760,17 @@ enum tcp_ca_event {
755 CA_EVENT_CWND_RESTART, /* congestion window restart */ 760 CA_EVENT_CWND_RESTART, /* congestion window restart */
756 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ 761 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
757 CA_EVENT_LOSS, /* loss timeout */ 762 CA_EVENT_LOSS, /* loss timeout */
758 CA_EVENT_FAST_ACK, /* in sequence ack */ 763 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
759 CA_EVENT_SLOW_ACK, /* other ack */ 764 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
765 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
766 CA_EVENT_NON_DELAYED_ACK,
767};
768
769/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
770enum tcp_ca_ack_event_flags {
771 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
772 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
773 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
760}; 774};
761 775
762/* 776/*
@@ -766,7 +780,10 @@ enum tcp_ca_event {
766#define TCP_CA_MAX 128 780#define TCP_CA_MAX 128
767#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) 781#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
768 782
783/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
769#define TCP_CONG_NON_RESTRICTED 0x1 784#define TCP_CONG_NON_RESTRICTED 0x1
785/* Requires ECN/ECT set on all packets */
786#define TCP_CONG_NEEDS_ECN 0x2
770 787
771struct tcp_congestion_ops { 788struct tcp_congestion_ops {
772 struct list_head list; 789 struct list_head list;
@@ -785,6 +802,8 @@ struct tcp_congestion_ops {
785 void (*set_state)(struct sock *sk, u8 new_state); 802 void (*set_state)(struct sock *sk, u8 new_state);
786 /* call when cwnd event occurs (optional) */ 803 /* call when cwnd event occurs (optional) */
787 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 804 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
805 /* call when ack arrives (optional) */
806 void (*in_ack_event)(struct sock *sk, u32 flags);
788 /* new value of cwnd after loss (optional) */ 807 /* new value of cwnd after loss (optional) */
789 u32 (*undo_cwnd)(struct sock *sk); 808 u32 (*undo_cwnd)(struct sock *sk);
790 /* hook for packet ack accounting (optional) */ 809 /* hook for packet ack accounting (optional) */
@@ -799,6 +818,7 @@ struct tcp_congestion_ops {
799int tcp_register_congestion_control(struct tcp_congestion_ops *type); 818int tcp_register_congestion_control(struct tcp_congestion_ops *type);
800void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 819void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
801 820
821void tcp_assign_congestion_control(struct sock *sk);
802void tcp_init_congestion_control(struct sock *sk); 822void tcp_init_congestion_control(struct sock *sk);
803void tcp_cleanup_congestion_control(struct sock *sk); 823void tcp_cleanup_congestion_control(struct sock *sk);
804int tcp_set_default_congestion_control(const char *name); 824int tcp_set_default_congestion_control(const char *name);
@@ -807,14 +827,20 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
807void tcp_get_allowed_congestion_control(char *buf, size_t len); 827void tcp_get_allowed_congestion_control(char *buf, size_t len);
808int tcp_set_allowed_congestion_control(char *allowed); 828int tcp_set_allowed_congestion_control(char *allowed);
809int tcp_set_congestion_control(struct sock *sk, const char *name); 829int tcp_set_congestion_control(struct sock *sk, const char *name);
810int tcp_slow_start(struct tcp_sock *tp, u32 acked); 830void tcp_slow_start(struct tcp_sock *tp, u32 acked);
811void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 831void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
812 832
813extern struct tcp_congestion_ops tcp_init_congestion_ops;
814u32 tcp_reno_ssthresh(struct sock *sk); 833u32 tcp_reno_ssthresh(struct sock *sk);
815void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 834void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
816extern struct tcp_congestion_ops tcp_reno; 835extern struct tcp_congestion_ops tcp_reno;
817 836
837static inline bool tcp_ca_needs_ecn(const struct sock *sk)
838{
839 const struct inet_connection_sock *icsk = inet_csk(sk);
840
841 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
842}
843
818static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 844static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
819{ 845{
820 struct inet_connection_sock *icsk = inet_csk(sk); 846 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1031,12 +1057,6 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
1031 tp->ucopy.len = 0; 1057 tp->ucopy.len = 0;
1032 tp->ucopy.memory = 0; 1058 tp->ucopy.memory = 0;
1033 skb_queue_head_init(&tp->ucopy.prequeue); 1059 skb_queue_head_init(&tp->ucopy.prequeue);
1034#ifdef CONFIG_NET_DMA
1035 tp->ucopy.dma_chan = NULL;
1036 tp->ucopy.wakeup = 0;
1037 tp->ucopy.pinned_list = NULL;
1038 tp->ucopy.dma_cookie = 0;
1039#endif
1040} 1060}
1041 1061
1042bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); 1062bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/udp.h b/include/net/udp.h
index 70f941368ace..07f9b70962f6 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr,
158void udp_set_csum(bool nocheck, struct sk_buff *skb, 158void udp_set_csum(bool nocheck, struct sk_buff *skb,
159 __be32 saddr, __be32 daddr, int len); 159 __be32 saddr, __be32 daddr, int len);
160 160
161struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
162 struct udphdr *uh);
163int udp_gro_complete(struct sk_buff *skb, int nhoff);
164
165static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
166{
167 struct udphdr *uh;
168 unsigned int hlen, off;
169
170 off = skb_gro_offset(skb);
171 hlen = off + sizeof(*uh);
172 uh = skb_gro_header_fast(skb, off);
173 if (skb_gro_header_hard(skb, hlen))
174 uh = skb_gro_header_slow(skb, hlen, off);
175
176 return uh;
177}
178
161/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ 179/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
162static inline void udp_lib_hash(struct sock *sk) 180static inline void udp_lib_hash(struct sock *sk)
163{ 181{
@@ -221,7 +239,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
221int udp_disconnect(struct sock *sk, int flags); 239int udp_disconnect(struct sock *sk, int flags);
222unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); 240unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
223struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 241struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
224 netdev_features_t features); 242 netdev_features_t features,
243 bool is_ipv6);
225int udp_lib_getsockopt(struct sock *sk, int level, int optname, 244int udp_lib_getsockopt(struct sock *sk, int level, int optname,
226 char __user *optval, int __user *optlen); 245 char __user *optval, int __user *optlen);
227int udp_lib_setsockopt(struct sock *sk, int level, int optname, 246int udp_lib_setsockopt(struct sock *sk, int level, int optname,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index ffd69cbded35..a47790bcaa38 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -1,6 +1,14 @@
1#ifndef __NET_UDP_TUNNEL_H 1#ifndef __NET_UDP_TUNNEL_H
2#define __NET_UDP_TUNNEL_H 2#define __NET_UDP_TUNNEL_H
3 3
4#include <net/ip_tunnels.h>
5#include <net/udp.h>
6
7#if IS_ENABLED(CONFIG_IPV6)
8#include <net/ipv6.h>
9#include <net/addrconf.h>
10#endif
11
4struct udp_port_cfg { 12struct udp_port_cfg {
5 u8 family; 13 u8 family;
6 14
@@ -26,7 +34,80 @@ struct udp_port_cfg {
26 use_udp6_rx_checksums:1; 34 use_udp6_rx_checksums:1;
27}; 35};
28 36
29int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, 37int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
30 struct socket **sockp); 38 struct socket **sockp);
39
40#if IS_ENABLED(CONFIG_IPV6)
41int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
42 struct socket **sockp);
43#else
44static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
45 struct socket **sockp)
46{
47 return 0;
48}
49#endif
50
51static inline int udp_sock_create(struct net *net,
52 struct udp_port_cfg *cfg,
53 struct socket **sockp)
54{
55 if (cfg->family == AF_INET)
56 return udp_sock_create4(net, cfg, sockp);
57
58 if (cfg->family == AF_INET6)
59 return udp_sock_create6(net, cfg, sockp);
60
61 return -EPFNOSUPPORT;
62}
63
64typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
65typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
66
67struct udp_tunnel_sock_cfg {
68 void *sk_user_data; /* user data used by encap_rcv call back */
69 /* Used for setting up udp_sock fields, see udp.h for details */
70 __u8 encap_type;
71 udp_tunnel_encap_rcv_t encap_rcv;
72 udp_tunnel_encap_destroy_t encap_destroy;
73};
74
75/* Setup the given (UDP) sock to receive UDP encapsulated packets */
76void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
77 struct udp_tunnel_sock_cfg *sock_cfg);
78
79/* Transmit the skb using UDP encapsulation. */
80int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
81 struct sk_buff *skb, __be32 src, __be32 dst,
82 __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
83 __be16 dst_port, bool xnet);
84
85#if IS_ENABLED(CONFIG_IPV6)
86int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
87 struct sk_buff *skb, struct net_device *dev,
88 struct in6_addr *saddr, struct in6_addr *daddr,
89 __u8 prio, __u8 ttl, __be16 src_port,
90 __be16 dst_port);
91#endif
92
93void udp_tunnel_sock_release(struct socket *sock);
94
95static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
96 bool udp_csum)
97{
98 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
99
100 return iptunnel_handle_offloads(skb, udp_csum, type);
101}
102
103static inline void udp_tunnel_encap_enable(struct socket *sock)
104{
105#if IS_ENABLED(CONFIG_IPV6)
106 if (sock->sk->sk_family == PF_INET6)
107 ipv6_stub->udpv6_encap_enable();
108 else
109#endif
110 udp_encap_enable();
111}
31 112
32#endif 113#endif
diff --git a/include/net/wimax.h b/include/net/wimax.h
index e52ef5357e08..c52b68577cb0 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -290,7 +290,7 @@ struct wimax_dev;
290 * This operation has to be synchronous, and return only when the 290 * This operation has to be synchronous, and return only when the
291 * reset is complete. In case of having had to resort to bus/cold 291 * reset is complete. In case of having had to resort to bus/cold
292 * reset implying a device disconnection, the call is allowed to 292 * reset implying a device disconnection, the call is allowed to
293 * return inmediately. 293 * return immediately.
294 * NOTE: wimax_dev->mutex is NOT locked when this op is being 294 * NOTE: wimax_dev->mutex is NOT locked when this op is being
295 * called; however, wimax_dev->mutex_reset IS locked to ensure 295 * called; however, wimax_dev->mutex_reset IS locked to ensure
296 * serialization of calls to wimax_reset(). 296 * serialization of calls to wimax_reset().
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 721e9c3b11bd..dc4865e90fe4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1591,6 +1591,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1591struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, 1591struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
1592 u32 id, int delete, int *err); 1592 u32 id, int delete, int *err);
1593int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); 1593int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1594void xfrm_policy_hash_rebuild(struct net *net);
1594u32 xfrm_get_acqseq(void); 1595u32 xfrm_get_acqseq(void);
1595int verify_spi_info(u8 proto, u32 min, u32 max); 1596int verify_spi_info(u8 proto, u32 min, u32 max);
1596int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); 1597int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 47da53c27ffa..79abb9c71772 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -8,6 +8,7 @@
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9#include <linux/edac.h> 9#include <linux/edac.h>
10#include <linux/ktime.h> 10#include <linux/ktime.h>
11#include <linux/pci.h>
11#include <linux/aer.h> 12#include <linux/aer.h>
12#include <linux/cper.h> 13#include <linux/cper.h>
13 14
@@ -173,25 +174,34 @@ TRACE_EVENT(mc_event,
173 * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED 174 * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED
174 */ 175 */
175 176
176#define aer_correctable_errors \ 177#define aer_correctable_errors \
177 {BIT(0), "Receiver Error"}, \ 178 {PCI_ERR_COR_RCVR, "Receiver Error"}, \
178 {BIT(6), "Bad TLP"}, \ 179 {PCI_ERR_COR_BAD_TLP, "Bad TLP"}, \
179 {BIT(7), "Bad DLLP"}, \ 180 {PCI_ERR_COR_BAD_DLLP, "Bad DLLP"}, \
180 {BIT(8), "RELAY_NUM Rollover"}, \ 181 {PCI_ERR_COR_REP_ROLL, "RELAY_NUM Rollover"}, \
181 {BIT(12), "Replay Timer Timeout"}, \ 182 {PCI_ERR_COR_REP_TIMER, "Replay Timer Timeout"}, \
182 {BIT(13), "Advisory Non-Fatal"} 183 {PCI_ERR_COR_ADV_NFAT, "Advisory Non-Fatal Error"}, \
183 184 {PCI_ERR_COR_INTERNAL, "Corrected Internal Error"}, \
184#define aer_uncorrectable_errors \ 185 {PCI_ERR_COR_LOG_OVER, "Header Log Overflow"}
185 {BIT(4), "Data Link Protocol"}, \ 186
186 {BIT(12), "Poisoned TLP"}, \ 187#define aer_uncorrectable_errors \
187 {BIT(13), "Flow Control Protocol"}, \ 188 {PCI_ERR_UNC_UND, "Undefined"}, \
188 {BIT(14), "Completion Timeout"}, \ 189 {PCI_ERR_UNC_DLP, "Data Link Protocol Error"}, \
189 {BIT(15), "Completer Abort"}, \ 190 {PCI_ERR_UNC_SURPDN, "Surprise Down Error"}, \
190 {BIT(16), "Unexpected Completion"}, \ 191 {PCI_ERR_UNC_POISON_TLP,"Poisoned TLP"}, \
191 {BIT(17), "Receiver Overflow"}, \ 192 {PCI_ERR_UNC_FCP, "Flow Control Protocol Error"}, \
192 {BIT(18), "Malformed TLP"}, \ 193 {PCI_ERR_UNC_COMP_TIME, "Completion Timeout"}, \
193 {BIT(19), "ECRC"}, \ 194 {PCI_ERR_UNC_COMP_ABORT,"Completer Abort"}, \
194 {BIT(20), "Unsupported Request"} 195 {PCI_ERR_UNC_UNX_COMP, "Unexpected Completion"}, \
196 {PCI_ERR_UNC_RX_OVER, "Receiver Overflow"}, \
197 {PCI_ERR_UNC_MALF_TLP, "Malformed TLP"}, \
198 {PCI_ERR_UNC_ECRC, "ECRC Error"}, \
199 {PCI_ERR_UNC_UNSUP, "Unsupported Request Error"}, \
200 {PCI_ERR_UNC_ACSV, "ACS Violation"}, \
201 {PCI_ERR_UNC_INTN, "Uncorrectable Internal Error"},\
202 {PCI_ERR_UNC_MCBTLP, "MC Blocked TLP"}, \
203 {PCI_ERR_UNC_ATOMEG, "AtomicOp Egress Blocked"}, \
204 {PCI_ERR_UNC_TLPPRE, "TLP Prefix Blocked Error"}
195 205
196TRACE_EVENT(aer_event, 206TRACE_EVENT(aer_event,
197 TP_PROTO(const char *dev_name, 207 TP_PROTO(const char *dev_name,
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 1ea0b65c4cfb..a2bf41e0bde9 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -47,6 +47,7 @@ struct ib_umem {
47 int writable; 47 int writable;
48 int hugetlb; 48 int hugetlb;
49 struct work_struct work; 49 struct work_struct work;
50 struct pid *pid;
50 struct mm_struct *mm; 51 struct mm_struct *mm;
51 unsigned long diff; 52 unsigned long diff;
52 struct sg_table sg_head; 53 struct sg_table sg_head;
diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h
deleted file mode 100644
index 30d48f6da228..000000000000
--- a/include/rxrpc/types.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/* types.h: Rx types
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_RXRPC_TYPES_H
13#define _LINUX_RXRPC_TYPES_H
14
15#include <linux/types.h>
16#include <linux/list.h>
17#include <linux/socket.h>
18#include <linux/in.h>
19#include <linux/spinlock.h>
20#include <linux/atomic.h>
21
22typedef uint32_t rxrpc_seq_t; /* Rx message sequence number */
23typedef uint32_t rxrpc_serial_t; /* Rx message serial number */
24typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */
25typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
26
27struct rxrpc_call;
28struct rxrpc_connection;
29struct rxrpc_header;
30struct rxrpc_message;
31struct rxrpc_operation;
32struct rxrpc_peer;
33struct rxrpc_service;
34typedef struct rxrpc_timer rxrpc_timer_t;
35struct rxrpc_transport;
36
37typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call);
38typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call);
39typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call);
40
41#endif /* _LINUX_RXRPC_TYPES_H */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 261e708010da..d17178e6fcdd 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -333,6 +333,7 @@ static inline int scsi_status_is_good(int status)
333#define TYPE_RBC 0x0e 333#define TYPE_RBC 0x0e
334#define TYPE_OSD 0x11 334#define TYPE_OSD 0x11
335#define TYPE_ZBC 0x14 335#define TYPE_ZBC 0x14
336#define TYPE_WLUN 0x1e /* well-known logical unit */
336#define TYPE_NO_LUN 0x7f 337#define TYPE_NO_LUN 0x7f
337 338
338/* SCSI protocols; these are taken from SPC-3 section 7.5 */ 339/* SCSI protocols; these are taken from SPC-3 section 7.5 */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 1a0d1842962e..27ecee73bd72 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -174,6 +174,7 @@ struct scsi_device {
174 unsigned wce_default_on:1; /* Cache is ON by default */ 174 unsigned wce_default_on:1; /* Cache is ON by default */
175 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ 175 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
176 unsigned broken_fua:1; /* Don't set FUA bit */ 176 unsigned broken_fua:1; /* Don't set FUA bit */
177 unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
177 178
178 atomic_t disk_events_disable_depth; /* disable depth for disk events */ 179 atomic_t disk_events_disable_depth; /* disable depth for disk events */
179 180
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ba2034779961..5e362489ee88 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -606,7 +606,7 @@ struct Scsi_Host {
606 /* 606 /*
607 * These three parameters can be used to allow for wide scsi, 607 * These three parameters can be used to allow for wide scsi,
608 * and for host adapters that support multiple busses 608 * and for host adapters that support multiple busses
609 * The first two should be set to 1 more than the actual max id 609 * The last two should be set to 1 more than the actual max id
610 * or lun (e.g. 8 for SCSI parallel systems). 610 * or lun (e.g. 8 for SCSI parallel systems).
611 */ 611 */
612 unsigned int max_channel; 612 unsigned int max_channel;
@@ -680,6 +680,7 @@ struct Scsi_Host {
680 unsigned no_write_same:1; 680 unsigned no_write_same:1;
681 681
682 unsigned use_blk_mq:1; 682 unsigned use_blk_mq:1;
683 unsigned use_cmd_list:1;
683 684
684 /* 685 /*
685 * Optional work queue to be utilized by the transport 686 * Optional work queue to be utilized by the transport
@@ -692,6 +693,9 @@ struct Scsi_Host {
692 */ 693 */
693 struct workqueue_struct *tmf_work_q; 694 struct workqueue_struct *tmf_work_q;
694 695
696 /* The transport requires the LUN bits NOT to be stored in CDB[1] */
697 unsigned no_scsi2_lun_in_cdb:1;
698
695 /* 699 /*
696 * Value host_blocked counts down from 700 * Value host_blocked counts down from
697 */ 701 */
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index cdcc90b07ecb..e64583560701 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -68,7 +68,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
68 return; 68 return;
69 69
70 if (!shost_use_blk_mq(sdev->host) && 70 if (!shost_use_blk_mq(sdev->host) &&
71 blk_queue_tagged(sdev->request_queue)) 71 !blk_queue_tagged(sdev->request_queue))
72 blk_queue_init_tags(sdev->request_queue, depth, 72 blk_queue_init_tags(sdev->request_queue, depth,
73 sdev->host->bqt); 73 sdev->host->bqt);
74 74
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6f3e10ca0e32..e862497f7556 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -183,6 +183,7 @@ struct snd_pcm_ops {
183#define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B) 183#define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B)
184#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) 184#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8)
185#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) 185#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE)
186#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE)
186 187
187#ifdef SNDRV_LITTLE_ENDIAN 188#ifdef SNDRV_LITTLE_ENDIAN
188#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE 189#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE
@@ -365,6 +366,7 @@ struct snd_pcm_runtime {
365 366
366struct snd_pcm_group { /* keep linked substreams */ 367struct snd_pcm_group { /* keep linked substreams */
367 spinlock_t lock; 368 spinlock_t lock;
369 struct mutex mutex;
368 struct list_head substreams; 370 struct list_head substreams;
369 int count; 371 int count;
370}; 372};
@@ -460,6 +462,7 @@ struct snd_pcm {
460 void (*private_free) (struct snd_pcm *pcm); 462 void (*private_free) (struct snd_pcm *pcm);
461 struct device *dev; /* actual hw device this belongs to */ 463 struct device *dev; /* actual hw device this belongs to */
462 bool internal; /* pcm is for internal use only */ 464 bool internal; /* pcm is for internal use only */
465 bool nonatomic; /* whole PCM operations are in non-atomic context */
463#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) 466#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
464 struct snd_pcm_oss oss; 467 struct snd_pcm_oss oss;
465#endif 468#endif
@@ -492,8 +495,6 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
492 * Native I/O 495 * Native I/O
493 */ 496 */
494 497
495extern rwlock_t snd_pcm_link_rwlock;
496
497int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info); 498int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info);
498int snd_pcm_info_user(struct snd_pcm_substream *substream, 499int snd_pcm_info_user(struct snd_pcm_substream *substream,
499 struct snd_pcm_info __user *info); 500 struct snd_pcm_info __user *info);
@@ -537,41 +538,18 @@ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
537 return substream->group != &substream->self_group; 538 return substream->group != &substream->self_group;
538} 539}
539 540
540static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 541void snd_pcm_stream_lock(struct snd_pcm_substream *substream);
541{ 542void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
542 read_lock(&snd_pcm_link_rwlock); 543void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
543 spin_lock(&substream->self_group.lock); 544void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
544} 545unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
545 546#define snd_pcm_stream_lock_irqsave(substream, flags) \
546static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) 547 do { \
547{ 548 typecheck(unsigned long, flags); \
548 spin_unlock(&substream->self_group.lock); 549 flags = _snd_pcm_stream_lock_irqsave(substream); \
549 read_unlock(&snd_pcm_link_rwlock); 550 } while (0)
550} 551void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
551 552 unsigned long flags);
552static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
553{
554 read_lock_irq(&snd_pcm_link_rwlock);
555 spin_lock(&substream->self_group.lock);
556}
557
558static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
559{
560 spin_unlock(&substream->self_group.lock);
561 read_unlock_irq(&snd_pcm_link_rwlock);
562}
563
564#define snd_pcm_stream_lock_irqsave(substream, flags) \
565do { \
566 read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \
567 spin_lock(&substream->self_group.lock); \
568} while (0)
569
570#define snd_pcm_stream_unlock_irqrestore(substream, flags) \
571do { \
572 spin_unlock(&substream->self_group.lock); \
573 read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \
574} while (0)
575 553
576#define snd_pcm_group_for_each_entry(s, substream) \ 554#define snd_pcm_group_for_each_entry(s, substream) \
577 list_for_each_entry(s, &substream->group->substreams, link_list) 555 list_for_each_entry(s, &substream->group->substreams, link_list)
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index 1de744c242f6..a5352712194b 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -20,6 +20,9 @@ struct rt5645_platform_data {
20 /* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */ 20 /* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
21 unsigned int dmic2_data_pin; 21 unsigned int dmic2_data_pin;
22 /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */ 22 /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
23
24 unsigned int hp_det_gpio;
25 bool gpio_hp_det_active_high;
23}; 26};
24 27
25#endif 28#endif
diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h
index 3da14313bcfc..082670e3a353 100644
--- a/include/sound/rt5677.h
+++ b/include/sound/rt5677.h
@@ -12,10 +12,21 @@
12#ifndef __LINUX_SND_RT5677_H 12#ifndef __LINUX_SND_RT5677_H
13#define __LINUX_SND_RT5677_H 13#define __LINUX_SND_RT5677_H
14 14
15enum rt5677_dmic2_clk {
16 RT5677_DMIC_CLK1 = 0,
17 RT5677_DMIC_CLK2 = 1,
18};
19
20
15struct rt5677_platform_data { 21struct rt5677_platform_data {
16 /* IN1 IN2 can optionally be differential */ 22 /* IN1/IN2/LOUT1/LOUT2/LOUT3 can optionally be differential */
17 bool in1_diff; 23 bool in1_diff;
18 bool in2_diff; 24 bool in2_diff;
25 bool lout1_diff;
26 bool lout2_diff;
27 bool lout3_diff;
28 /* DMIC2 clock source selection */
29 enum rt5677_dmic2_clk dmic2_clk_pin;
19}; 30};
20 31
21#endif 32#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index aac04ff84eea..3a4d7da67b8d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -432,6 +432,7 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
432int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, 432int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
433 const char *pin); 433 const char *pin);
434void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card); 434void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
435unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
435 436
436/* Mostly internal - should not normally be used */ 437/* Mostly internal - should not normally be used */
437void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm); 438void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm);
@@ -587,13 +588,13 @@ struct snd_soc_dapm_context {
587 enum snd_soc_bias_level suspend_bias_level; 588 enum snd_soc_bias_level suspend_bias_level;
588 struct delayed_work delayed_work; 589 struct delayed_work delayed_work;
589 unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */ 590 unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
590 591 /* Go to BIAS_OFF in suspend if the DAPM context is idle */
592 unsigned int suspend_bias_off:1;
591 void (*seq_notifier)(struct snd_soc_dapm_context *, 593 void (*seq_notifier)(struct snd_soc_dapm_context *,
592 enum snd_soc_dapm_type, int); 594 enum snd_soc_dapm_type, int);
593 595
594 struct device *dev; /* from parent - for debug */ 596 struct device *dev; /* from parent - for debug */
595 struct snd_soc_component *component; /* parent component */ 597 struct snd_soc_component *component; /* parent component */
596 struct snd_soc_codec *codec; /* parent codec */
597 struct snd_soc_card *card; /* parent card */ 598 struct snd_soc_card *card; /* parent card */
598 599
599 /* used during DAPM updates */ 600 /* used during DAPM updates */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index be6ecae247b0..7ba7130037a0 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -277,7 +277,7 @@
277 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \ 277 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \
278 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \ 278 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
279 .tlv.c = (snd_soc_bytes_tlv_callback), \ 279 .tlv.c = (snd_soc_bytes_tlv_callback), \
280 .info = snd_soc_info_bytes_ext, \ 280 .info = snd_soc_bytes_info_ext, \
281 .private_value = (unsigned long)&(struct soc_bytes_ext) \ 281 .private_value = (unsigned long)&(struct soc_bytes_ext) \
282 {.max = xcount, .get = xhandler_get, .put = xhandler_put, } } 282 {.max = xcount, .get = xhandler_get, .put = xhandler_put, } }
283#define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \ 283#define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \
@@ -690,6 +690,17 @@ struct snd_soc_compr_ops {
690struct snd_soc_component_driver { 690struct snd_soc_component_driver {
691 const char *name; 691 const char *name;
692 692
693 /* Default control and setup, added after probe() is run */
694 const struct snd_kcontrol_new *controls;
695 unsigned int num_controls;
696 const struct snd_soc_dapm_widget *dapm_widgets;
697 unsigned int num_dapm_widgets;
698 const struct snd_soc_dapm_route *dapm_routes;
699 unsigned int num_dapm_routes;
700
701 int (*probe)(struct snd_soc_component *);
702 void (*remove)(struct snd_soc_component *);
703
693 /* DT */ 704 /* DT */
694 int (*of_xlate_dai_name)(struct snd_soc_component *component, 705 int (*of_xlate_dai_name)(struct snd_soc_component *component,
695 struct of_phandle_args *args, 706 struct of_phandle_args *args,
@@ -697,6 +708,10 @@ struct snd_soc_component_driver {
697 void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type, 708 void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type,
698 int subseq); 709 int subseq);
699 int (*stream_event)(struct snd_soc_component *, int event); 710 int (*stream_event)(struct snd_soc_component *, int event);
711
712 /* probe ordering - for components with runtime dependencies */
713 int probe_order;
714 int remove_order;
700}; 715};
701 716
702struct snd_soc_component { 717struct snd_soc_component {
@@ -710,6 +725,7 @@ struct snd_soc_component {
710 725
711 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */ 726 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
712 unsigned int registered_as_component:1; 727 unsigned int registered_as_component:1;
728 unsigned int probed:1;
713 729
714 struct list_head list; 730 struct list_head list;
715 731
@@ -728,9 +744,35 @@ struct snd_soc_component {
728 744
729 struct mutex io_mutex; 745 struct mutex io_mutex;
730 746
747#ifdef CONFIG_DEBUG_FS
748 struct dentry *debugfs_root;
749#endif
750
751 /*
752 * DO NOT use any of the fields below in drivers, they are temporary and
753 * are going to be removed again soon. If you use them in driver code the
754 * driver will be marked as BROKEN when these fields are removed.
755 */
756
731 /* Don't use these, use snd_soc_component_get_dapm() */ 757 /* Don't use these, use snd_soc_component_get_dapm() */
732 struct snd_soc_dapm_context dapm; 758 struct snd_soc_dapm_context dapm;
733 struct snd_soc_dapm_context *dapm_ptr; 759 struct snd_soc_dapm_context *dapm_ptr;
760
761 const struct snd_kcontrol_new *controls;
762 unsigned int num_controls;
763 const struct snd_soc_dapm_widget *dapm_widgets;
764 unsigned int num_dapm_widgets;
765 const struct snd_soc_dapm_route *dapm_routes;
766 unsigned int num_dapm_routes;
767 struct snd_soc_codec *codec;
768
769 int (*probe)(struct snd_soc_component *);
770 void (*remove)(struct snd_soc_component *);
771
772#ifdef CONFIG_DEBUG_FS
773 void (*init_debugfs)(struct snd_soc_component *component);
774 const char *debugfs_prefix;
775#endif
734}; 776};
735 777
736/* SoC Audio Codec device */ 778/* SoC Audio Codec device */
@@ -746,11 +788,9 @@ struct snd_soc_codec {
746 struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */ 788 struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
747 unsigned int cache_bypass:1; /* Suppress access to the cache */ 789 unsigned int cache_bypass:1; /* Suppress access to the cache */
748 unsigned int suspended:1; /* Codec is in suspend PM state */ 790 unsigned int suspended:1; /* Codec is in suspend PM state */
749 unsigned int probed:1; /* Codec has been probed */
750 unsigned int ac97_registered:1; /* Codec has been AC97 registered */ 791 unsigned int ac97_registered:1; /* Codec has been AC97 registered */
751 unsigned int ac97_created:1; /* Codec has been created by SoC */ 792 unsigned int ac97_created:1; /* Codec has been created by SoC */
752 unsigned int cache_init:1; /* codec cache has been initialized */ 793 unsigned int cache_init:1; /* codec cache has been initialized */
753 u32 cache_only; /* Suppress writes to hardware */
754 u32 cache_sync; /* Cache needs to be synced to hardware */ 794 u32 cache_sync; /* Cache needs to be synced to hardware */
755 795
756 /* codec IO */ 796 /* codec IO */
@@ -766,7 +806,6 @@ struct snd_soc_codec {
766 struct snd_soc_dapm_context dapm; 806 struct snd_soc_dapm_context dapm;
767 807
768#ifdef CONFIG_DEBUG_FS 808#ifdef CONFIG_DEBUG_FS
769 struct dentry *debugfs_codec_root;
770 struct dentry *debugfs_reg; 809 struct dentry *debugfs_reg;
771#endif 810#endif
772}; 811};
@@ -808,15 +847,12 @@ struct snd_soc_codec_driver {
808 int (*set_bias_level)(struct snd_soc_codec *, 847 int (*set_bias_level)(struct snd_soc_codec *,
809 enum snd_soc_bias_level level); 848 enum snd_soc_bias_level level);
810 bool idle_bias_off; 849 bool idle_bias_off;
850 bool suspend_bias_off;
811 851
812 void (*seq_notifier)(struct snd_soc_dapm_context *, 852 void (*seq_notifier)(struct snd_soc_dapm_context *,
813 enum snd_soc_dapm_type, int); 853 enum snd_soc_dapm_type, int);
814 854
815 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */ 855 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
816
817 /* probe ordering - for components with runtime dependencies */
818 int probe_order;
819 int remove_order;
820}; 856};
821 857
822/* SoC platform interface */ 858/* SoC platform interface */
@@ -832,14 +868,6 @@ struct snd_soc_platform_driver {
832 int (*pcm_new)(struct snd_soc_pcm_runtime *); 868 int (*pcm_new)(struct snd_soc_pcm_runtime *);
833 void (*pcm_free)(struct snd_pcm *); 869 void (*pcm_free)(struct snd_pcm *);
834 870
835 /* Default control and setup, added after probe() is run */
836 const struct snd_kcontrol_new *controls;
837 int num_controls;
838 const struct snd_soc_dapm_widget *dapm_widgets;
839 int num_dapm_widgets;
840 const struct snd_soc_dapm_route *dapm_routes;
841 int num_dapm_routes;
842
843 /* 871 /*
844 * For platform caused delay reporting. 872 * For platform caused delay reporting.
845 * Optional. 873 * Optional.
@@ -853,13 +881,6 @@ struct snd_soc_platform_driver {
853 /* platform stream compress ops */ 881 /* platform stream compress ops */
854 const struct snd_compr_ops *compr_ops; 882 const struct snd_compr_ops *compr_ops;
855 883
856 /* probe ordering - for components with runtime dependencies */
857 int probe_order;
858 int remove_order;
859
860 /* platform IO - used for platform DAPM */
861 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
862 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
863 int (*bespoke_trigger)(struct snd_pcm_substream *, int); 884 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
864}; 885};
865 886
@@ -874,15 +895,10 @@ struct snd_soc_platform {
874 const struct snd_soc_platform_driver *driver; 895 const struct snd_soc_platform_driver *driver;
875 896
876 unsigned int suspended:1; /* platform is suspended */ 897 unsigned int suspended:1; /* platform is suspended */
877 unsigned int probed:1;
878 898
879 struct list_head list; 899 struct list_head list;
880 900
881 struct snd_soc_component component; 901 struct snd_soc_component component;
882
883#ifdef CONFIG_DEBUG_FS
884 struct dentry *debugfs_platform_root;
885#endif
886}; 902};
887 903
888struct snd_soc_dai_link { 904struct snd_soc_dai_link {
@@ -897,7 +913,7 @@ struct snd_soc_dai_link {
897 * only for codec to codec links, or systems using device tree. 913 * only for codec to codec links, or systems using device tree.
898 */ 914 */
899 const char *cpu_name; 915 const char *cpu_name;
900 const struct device_node *cpu_of_node; 916 struct device_node *cpu_of_node;
901 /* 917 /*
902 * You MAY specify the DAI name of the CPU DAI. If this information is 918 * You MAY specify the DAI name of the CPU DAI. If this information is
903 * omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node 919 * omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node
@@ -909,7 +925,7 @@ struct snd_soc_dai_link {
909 * DT/OF node, but not both. 925 * DT/OF node, but not both.
910 */ 926 */
911 const char *codec_name; 927 const char *codec_name;
912 const struct device_node *codec_of_node; 928 struct device_node *codec_of_node;
913 /* You MUST specify the DAI name within the codec */ 929 /* You MUST specify the DAI name within the codec */
914 const char *codec_dai_name; 930 const char *codec_dai_name;
915 931
@@ -922,7 +938,7 @@ struct snd_soc_dai_link {
922 * do not need a platform. 938 * do not need a platform.
923 */ 939 */
924 const char *platform_name; 940 const char *platform_name;
925 const struct device_node *platform_of_node; 941 struct device_node *platform_of_node;
926 int be_id; /* optional ID for machine driver BE identification */ 942 int be_id; /* optional ID for machine driver BE identification */
927 943
928 const struct snd_soc_pcm_stream *params; 944 const struct snd_soc_pcm_stream *params;
@@ -994,7 +1010,7 @@ struct snd_soc_aux_dev {
994 const struct device_node *codec_of_node; 1010 const struct device_node *codec_of_node;
995 1011
996 /* codec/machine specific init - e.g. add machine controls */ 1012 /* codec/machine specific init - e.g. add machine controls */
997 int (*init)(struct snd_soc_dapm_context *dapm); 1013 int (*init)(struct snd_soc_component *component);
998}; 1014};
999 1015
1000/* SoC card */ 1016/* SoC card */
@@ -1112,6 +1128,7 @@ struct snd_soc_pcm_runtime {
1112 struct snd_soc_platform *platform; 1128 struct snd_soc_platform *platform;
1113 struct snd_soc_dai *codec_dai; 1129 struct snd_soc_dai *codec_dai;
1114 struct snd_soc_dai *cpu_dai; 1130 struct snd_soc_dai *cpu_dai;
1131 struct snd_soc_component *component; /* Only valid for AUX dev rtds */
1115 1132
1116 struct snd_soc_dai **codec_dais; 1133 struct snd_soc_dai **codec_dais;
1117 unsigned int num_codecs; 1134 unsigned int num_codecs;
@@ -1260,9 +1277,6 @@ void snd_soc_component_async_complete(struct snd_soc_component *component);
1260int snd_soc_component_test_bits(struct snd_soc_component *component, 1277int snd_soc_component_test_bits(struct snd_soc_component *component,
1261 unsigned int reg, unsigned int mask, unsigned int value); 1278 unsigned int reg, unsigned int mask, unsigned int value);
1262 1279
1263int snd_soc_component_init_io(struct snd_soc_component *component,
1264 struct regmap *regmap);
1265
1266/* device driver data */ 1280/* device driver data */
1267 1281
1268static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, 1282static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
@@ -1276,26 +1290,37 @@ static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
1276 return card->drvdata; 1290 return card->drvdata;
1277} 1291}
1278 1292
1293static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c,
1294 void *data)
1295{
1296 dev_set_drvdata(c->dev, data);
1297}
1298
1299static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c)
1300{
1301 return dev_get_drvdata(c->dev);
1302}
1303
1279static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec, 1304static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec,
1280 void *data) 1305 void *data)
1281{ 1306{
1282 dev_set_drvdata(codec->dev, data); 1307 snd_soc_component_set_drvdata(&codec->component, data);
1283} 1308}
1284 1309
1285static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec) 1310static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec)
1286{ 1311{
1287 return dev_get_drvdata(codec->dev); 1312 return snd_soc_component_get_drvdata(&codec->component);
1288} 1313}
1289 1314
1290static inline void snd_soc_platform_set_drvdata(struct snd_soc_platform *platform, 1315static inline void snd_soc_platform_set_drvdata(struct snd_soc_platform *platform,
1291 void *data) 1316 void *data)
1292{ 1317{
1293 dev_set_drvdata(platform->dev, data); 1318 snd_soc_component_set_drvdata(&platform->component, data);
1294} 1319}
1295 1320
1296static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platform) 1321static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platform)
1297{ 1322{
1298 return dev_get_drvdata(platform->dev); 1323 return snd_soc_component_get_drvdata(&platform->component);
1299} 1324}
1300 1325
1301static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd, 1326static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd,
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index f634f8f85db5..cae9c9d4ef22 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -80,8 +80,6 @@ struct vx_pipe {
80 80
81 unsigned int references; /* an output pipe may be used for monitoring and/or playback */ 81 unsigned int references; /* an output pipe may be used for monitoring and/or playback */
82 struct vx_pipe *monitoring_pipe; /* pointer to the monitoring pipe (capture pipe only)*/ 82 struct vx_pipe *monitoring_pipe; /* pointer to the monitoring pipe (capture pipe only)*/
83
84 struct tasklet_struct start_tq;
85}; 83};
86 84
87struct vx_core; 85struct vx_core;
@@ -165,9 +163,7 @@ struct vx_core {
165 struct snd_vx_hardware *hw; 163 struct snd_vx_hardware *hw;
166 struct snd_vx_ops *ops; 164 struct snd_vx_ops *ops;
167 165
168 spinlock_t lock; 166 struct mutex lock;
169 spinlock_t irq_lock;
170 struct tasklet_struct tq;
171 167
172 unsigned int chip_status; 168 unsigned int chip_status;
173 unsigned int pcm_running; 169 unsigned int pcm_running;
@@ -223,6 +219,7 @@ void snd_vx_free_firmware(struct vx_core *chip);
223 * interrupt handler; exported for pcmcia 219 * interrupt handler; exported for pcmcia
224 */ 220 */
225irqreturn_t snd_vx_irq_handler(int irq, void *dev); 221irqreturn_t snd_vx_irq_handler(int irq, void *dev);
222irqreturn_t snd_vx_threaded_irq_handler(int irq, void *dev);
226 223
227/* 224/*
228 * lowlevel functions 225 * lowlevel functions
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 0194a641e4e2..b04ee7e5a466 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -175,7 +175,7 @@ TRACE_EVENT(snd_soc_dapm_output_path,
175 __entry->path_sink = (long)path->sink; 175 __entry->path_sink = (long)path->sink;
176 ), 176 ),
177 177
178 TP_printk("%c%s -> %s -> %s\n", 178 TP_printk("%c%s -> %s -> %s",
179 (int) __entry->path_sink && 179 (int) __entry->path_sink &&
180 (int) __entry->path_connect ? '*' : ' ', 180 (int) __entry->path_connect ? '*' : ' ',
181 __get_str(wname), __get_str(pname), __get_str(psname)) 181 __get_str(wname), __get_str(pname), __get_str(psname))
@@ -204,7 +204,7 @@ TRACE_EVENT(snd_soc_dapm_input_path,
204 __entry->path_source = (long)path->source; 204 __entry->path_source = (long)path->source;
205 ), 205 ),
206 206
207 TP_printk("%c%s <- %s <- %s\n", 207 TP_printk("%c%s <- %s <- %s",
208 (int) __entry->path_source && 208 (int) __entry->path_source &&
209 (int) __entry->path_connect ? '*' : ' ', 209 (int) __entry->path_connect ? '*' : ' ',
210 __get_str(wname), __get_str(pname), __get_str(psname)) 210 __get_str(wname), __get_str(pname), __get_str(psname))
@@ -226,7 +226,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
226 __entry->stream = stream; 226 __entry->stream = stream;
227 ), 227 ),
228 228
229 TP_printk("%s: found %d paths\n", 229 TP_printk("%s: found %d paths",
230 __entry->stream ? "capture" : "playback", __entry->paths) 230 __entry->stream ? "capture" : "playback", __entry->paths)
231); 231);
232 232
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4ee4e30d26d9..1faecea101f3 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,6 +23,7 @@ struct map_lookup;
23struct extent_buffer; 23struct extent_buffer;
24struct btrfs_work; 24struct btrfs_work;
25struct __btrfs_workqueue; 25struct __btrfs_workqueue;
26struct btrfs_qgroup_operation;
26 27
27#define show_ref_type(type) \ 28#define show_ref_type(type) \
28 __print_symbolic(type, \ 29 __print_symbolic(type, \
@@ -157,12 +158,13 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
157 158
158#define show_map_flags(flag) \ 159#define show_map_flags(flag) \
159 __print_flags(flag, "|", \ 160 __print_flags(flag, "|", \
160 { EXTENT_FLAG_PINNED, "PINNED" }, \ 161 { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
161 { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \ 162 { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
162 { EXTENT_FLAG_VACANCY, "VACANCY" }, \ 163 { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
163 { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \ 164 { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
164 { EXTENT_FLAG_LOGGING, "LOGGING" }, \ 165 { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
165 { EXTENT_FLAG_FILLING, "FILLING" }) 166 { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
167 { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
166 168
167TRACE_EVENT_CONDITION(btrfs_get_extent, 169TRACE_EVENT_CONDITION(btrfs_get_extent,
168 170
@@ -996,6 +998,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
996 __field( void *, func ) 998 __field( void *, func )
997 __field( void *, ordered_func ) 999 __field( void *, ordered_func )
998 __field( void *, ordered_free ) 1000 __field( void *, ordered_free )
1001 __field( void *, normal_work )
999 ), 1002 ),
1000 1003
1001 TP_fast_assign( 1004 TP_fast_assign(
@@ -1004,11 +1007,13 @@ DECLARE_EVENT_CLASS(btrfs__work,
1004 __entry->func = work->func; 1007 __entry->func = work->func;
1005 __entry->ordered_func = work->ordered_func; 1008 __entry->ordered_func = work->ordered_func;
1006 __entry->ordered_free = work->ordered_free; 1009 __entry->ordered_free = work->ordered_free;
1010 __entry->normal_work = &work->normal_work;
1007 ), 1011 ),
1008 1012
1009 TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p", 1013 TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
1010 __entry->work, __entry->wq, __entry->func, 1014 " ordered_free=%p",
1011 __entry->ordered_func, __entry->ordered_free) 1015 __entry->work, __entry->normal_work, __entry->wq,
1016 __entry->func, __entry->ordered_func, __entry->ordered_free)
1012); 1017);
1013 1018
1014/* For situiations that the work is freed */ 1019/* For situiations that the work is freed */
@@ -1043,13 +1048,6 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
1043 TP_ARGS(work) 1048 TP_ARGS(work)
1044); 1049);
1045 1050
1046DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
1047
1048 TP_PROTO(struct btrfs_work *work),
1049
1050 TP_ARGS(work)
1051);
1052
1053DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, 1051DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
1054 1052
1055 TP_PROTO(struct btrfs_work *work), 1053 TP_PROTO(struct btrfs_work *work),
@@ -1119,6 +1117,61 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
1119 TP_ARGS(wq) 1117 TP_ARGS(wq)
1120); 1118);
1121 1119
1120#define show_oper_type(type) \
1121 __print_symbolic(type, \
1122 { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \
1123 { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \
1124 { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \
1125 { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" })
1126
1127DECLARE_EVENT_CLASS(btrfs_qgroup_oper,
1128
1129 TP_PROTO(struct btrfs_qgroup_operation *oper),
1130
1131 TP_ARGS(oper),
1132
1133 TP_STRUCT__entry(
1134 __field( u64, ref_root )
1135 __field( u64, bytenr )
1136 __field( u64, num_bytes )
1137 __field( u64, seq )
1138 __field( int, type )
1139 __field( u64, elem_seq )
1140 ),
1141
1142 TP_fast_assign(
1143 __entry->ref_root = oper->ref_root;
1144 __entry->bytenr = oper->bytenr,
1145 __entry->num_bytes = oper->num_bytes;
1146 __entry->seq = oper->seq;
1147 __entry->type = oper->type;
1148 __entry->elem_seq = oper->elem.seq;
1149 ),
1150
1151 TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, "
1152 "seq = %llu, elem.seq = %llu, type = %s",
1153 (unsigned long long)__entry->ref_root,
1154 (unsigned long long)__entry->bytenr,
1155 (unsigned long long)__entry->num_bytes,
1156 (unsigned long long)__entry->seq,
1157 (unsigned long long)__entry->elem_seq,
1158 show_oper_type(__entry->type))
1159);
1160
1161DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account,
1162
1163 TP_PROTO(struct btrfs_qgroup_operation *oper),
1164
1165 TP_ARGS(oper)
1166);
1167
1168DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref,
1169
1170 TP_PROTO(struct btrfs_qgroup_operation *oper),
1171
1172 TP_ARGS(oper)
1173);
1174
1122#endif /* _TRACE_BTRFS_H */ 1175#endif /* _TRACE_BTRFS_H */
1123 1176
1124/* This part must be outside protection */ 1177/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index d06d44363fea..bbc4de9baef7 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -69,6 +69,12 @@
69 { GC_GREEDY, "Greedy" }, \ 69 { GC_GREEDY, "Greedy" }, \
70 { GC_CB, "Cost-Benefit" }) 70 { GC_CB, "Cost-Benefit" })
71 71
72#define show_cpreason(type) \
73 __print_symbolic(type, \
74 { CP_UMOUNT, "Umount" }, \
75 { CP_SYNC, "Sync" }, \
76 { CP_DISCARD, "Discard" })
77
72struct victim_sel_policy; 78struct victim_sel_policy;
73 79
74DECLARE_EVENT_CLASS(f2fs__inode, 80DECLARE_EVENT_CLASS(f2fs__inode,
@@ -944,25 +950,25 @@ TRACE_EVENT(f2fs_submit_page_mbio,
944 950
945TRACE_EVENT(f2fs_write_checkpoint, 951TRACE_EVENT(f2fs_write_checkpoint,
946 952
947 TP_PROTO(struct super_block *sb, bool is_umount, char *msg), 953 TP_PROTO(struct super_block *sb, int reason, char *msg),
948 954
949 TP_ARGS(sb, is_umount, msg), 955 TP_ARGS(sb, reason, msg),
950 956
951 TP_STRUCT__entry( 957 TP_STRUCT__entry(
952 __field(dev_t, dev) 958 __field(dev_t, dev)
953 __field(bool, is_umount) 959 __field(int, reason)
954 __field(char *, msg) 960 __field(char *, msg)
955 ), 961 ),
956 962
957 TP_fast_assign( 963 TP_fast_assign(
958 __entry->dev = sb->s_dev; 964 __entry->dev = sb->s_dev;
959 __entry->is_umount = is_umount; 965 __entry->reason = reason;
960 __entry->msg = msg; 966 __entry->msg = msg;
961 ), 967 ),
962 968
963 TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", 969 TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
964 show_dev(__entry), 970 show_dev(__entry),
965 __entry->is_umount ? "clean umount" : "consistency", 971 show_cpreason(__entry->reason),
966 __entry->msg) 972 __entry->msg)
967); 973);
968 974
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 59d11c22f076..a0d008070962 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -53,15 +53,15 @@ DECLARE_EVENT_CLASS(filelock_lease,
53 ), 53 ),
54 54
55 TP_fast_assign( 55 TP_fast_assign(
56 __entry->fl = fl; 56 __entry->fl = fl ? fl : NULL;
57 __entry->s_dev = inode->i_sb->s_dev; 57 __entry->s_dev = inode->i_sb->s_dev;
58 __entry->i_ino = inode->i_ino; 58 __entry->i_ino = inode->i_ino;
59 __entry->fl_next = fl->fl_next; 59 __entry->fl_next = fl ? fl->fl_next : NULL;
60 __entry->fl_owner = fl->fl_owner; 60 __entry->fl_owner = fl ? fl->fl_owner : NULL;
61 __entry->fl_flags = fl->fl_flags; 61 __entry->fl_flags = fl ? fl->fl_flags : 0;
62 __entry->fl_type = fl->fl_type; 62 __entry->fl_type = fl ? fl->fl_type : 0;
63 __entry->fl_break_time = fl->fl_break_time; 63 __entry->fl_break_time = fl ? fl->fl_break_time : 0;
64 __entry->fl_downgrade_time = fl->fl_downgrade_time; 64 __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
65 ), 65 ),
66 66
67 TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu", 67 TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1c09820df585..3608bebd3d9c 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq,
107 * @vec_nr: softirq vector number 107 * @vec_nr: softirq vector number
108 * 108 *
109 * When used in combination with the softirq_exit tracepoint 109 * When used in combination with the softirq_exit tracepoint
110 * we can determine the softirq handler runtine. 110 * we can determine the softirq handler routine.
111 */ 111 */
112DEFINE_EVENT(softirq, softirq_entry, 112DEFINE_EVENT(softirq, softirq_entry,
113 113
@@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry,
121 * @vec_nr: softirq vector number 121 * @vec_nr: softirq vector number
122 * 122 *
123 * When used in combination with the softirq_entry tracepoint 123 * When used in combination with the softirq_entry tracepoint
124 * we can determine the softirq handler runtine. 124 * we can determine the softirq handler routine.
125 */ 125 */
126DEFINE_EVENT(softirq, softirq_exit, 126DEFINE_EVENT(softirq, softirq_exit,
127 127
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 908925ace776..6edf1f2028cd 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -95,6 +95,26 @@ TRACE_EVENT(kvm_ioapic_set_irq,
95 __entry->coalesced ? " (coalesced)" : "") 95 __entry->coalesced ? " (coalesced)" : "")
96); 96);
97 97
98TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
99 TP_PROTO(__u64 e),
100 TP_ARGS(e),
101
102 TP_STRUCT__entry(
103 __field( __u64, e )
104 ),
105
106 TP_fast_assign(
107 __entry->e = e;
108 ),
109
110 TP_printk("dst %x vec=%u (%s|%s|%s%s)",
111 (u8)(__entry->e >> 56), (u8)__entry->e,
112 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
113 (__entry->e & (1<<11)) ? "logical" : "physical",
114 (__entry->e & (1<<15)) ? "level" : "edge",
115 (__entry->e & (1<<16)) ? "|masked" : "")
116);
117
98TRACE_EVENT(kvm_msi_set_irq, 118TRACE_EVENT(kvm_msi_set_irq,
99 TP_PROTO(__u64 address, __u64 data), 119 TP_PROTO(__u64 address, __u64 data),
100 TP_ARGS(address, data), 120 TP_ARGS(address, data),
@@ -205,24 +225,26 @@ TRACE_EVENT(kvm_fpu,
205); 225);
206 226
207TRACE_EVENT(kvm_age_page, 227TRACE_EVENT(kvm_age_page,
208 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref), 228 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
209 TP_ARGS(hva, slot, ref), 229 TP_ARGS(gfn, level, slot, ref),
210 230
211 TP_STRUCT__entry( 231 TP_STRUCT__entry(
212 __field( u64, hva ) 232 __field( u64, hva )
213 __field( u64, gfn ) 233 __field( u64, gfn )
234 __field( u8, level )
214 __field( u8, referenced ) 235 __field( u8, referenced )
215 ), 236 ),
216 237
217 TP_fast_assign( 238 TP_fast_assign(
218 __entry->hva = hva; 239 __entry->gfn = gfn;
219 __entry->gfn = 240 __entry->level = level;
220 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT); 241 __entry->hva = ((gfn - slot->base_gfn) <<
242 PAGE_SHIFT) + slot->userspace_addr;
221 __entry->referenced = ref; 243 __entry->referenced = ref;
222 ), 244 ),
223 245
224 TP_printk("hva %llx gfn %llx %s", 246 TP_printk("hva %llx gfn %llx level %u %s",
225 __entry->hva, __entry->gfn, 247 __entry->hva, __entry->gfn, __entry->level,
226 __entry->referenced ? "YOUNG" : "OLD") 248 __entry->referenced ? "YOUNG" : "OLD")
227); 249);
228 250
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index aca382266411..9b56f37148cf 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -180,9 +180,12 @@ TRACE_EVENT(rcu_grace_period_init,
180 * argument is a string as follows: 180 * argument is a string as follows:
181 * 181 *
182 * "WakeEmpty": Wake rcuo kthread, first CB to empty list. 182 * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
183 * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
183 * "WakeOvf": Wake rcuo kthread, CB list is huge. 184 * "WakeOvf": Wake rcuo kthread, CB list is huge.
185 * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
184 * "WakeNot": Don't wake rcuo kthread. 186 * "WakeNot": Don't wake rcuo kthread.
185 * "WakeNotPoll": Don't wake rcuo kthread because it is polling. 187 * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
188 * "DeferredWake": Carried out the "IsDeferred" wakeup.
186 * "Poll": Start of new polling cycle for rcu_nocb_poll. 189 * "Poll": Start of new polling cycle for rcu_nocb_poll.
187 * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll. 190 * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
188 * "WokeEmpty": rcuo kthread woke to find empty list. 191 * "WokeEmpty": rcuo kthread woke to find empty list.
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild
index 81d2106287fe..245aa6e05e6a 100644
--- a/include/uapi/Kbuild
+++ b/include/uapi/Kbuild
@@ -12,3 +12,4 @@ header-y += video/
12header-y += drm/ 12header-y += drm/
13header-y += xen/ 13header-y += xen/
14header-y += scsi/ 14header-y += scsi/
15header-y += misc/
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 11d11bc5c78f..22749c134117 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -705,9 +705,11 @@ __SYSCALL(__NR_seccomp, sys_seccomp)
705__SYSCALL(__NR_getrandom, sys_getrandom) 705__SYSCALL(__NR_getrandom, sys_getrandom)
706#define __NR_memfd_create 279 706#define __NR_memfd_create 279
707__SYSCALL(__NR_memfd_create, sys_memfd_create) 707__SYSCALL(__NR_memfd_create, sys_memfd_create)
708#define __NR_bpf 280
709__SYSCALL(__NR_bpf, sys_bpf)
708 710
709#undef __NR_syscalls 711#undef __NR_syscalls
710#define __NR_syscalls 280 712#define __NR_syscalls 281
711 713
712/* 714/*
713 * All syscalls below here should go away really, 715 * All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 24e9033f8b3f..3cc8e1c2b996 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -67,6 +67,7 @@ header-y += bfs_fs.h
67header-y += binfmts.h 67header-y += binfmts.h
68header-y += blkpg.h 68header-y += blkpg.h
69header-y += blktrace_api.h 69header-y += blktrace_api.h
70header-y += bpf.h
70header-y += bpqether.h 71header-y += bpqether.h
71header-y += bsg.h 72header-y += bsg.h
72header-y += btrfs.h 73header-y += btrfs.h
@@ -240,6 +241,7 @@ header-y += matroxfb.h
240header-y += mdio.h 241header-y += mdio.h
241header-y += media.h 242header-y += media.h
242header-y += mei.h 243header-y += mei.h
244header-y += memfd.h
243header-y += mempolicy.h 245header-y += mempolicy.h
244header-y += meye.h 246header-y += meye.h
245header-y += mic_common.h 247header-y += mic_common.h
@@ -353,6 +355,7 @@ header-y += serio.h
353header-y += shm.h 355header-y += shm.h
354header-y += signal.h 356header-y += signal.h
355header-y += signalfd.h 357header-y += signalfd.h
358header-y += smiapp.h
356header-y += snmp.h 359header-y += snmp.h
357header-y += sock_diag.h 360header-y += sock_diag.h
358header-y += socket.h 361header-y += socket.h
@@ -395,6 +398,7 @@ header-y += un.h
395header-y += unistd.h 398header-y += unistd.h
396header-y += unix_diag.h 399header-y += unix_diag.h
397header-y += usbdevice_fs.h 400header-y += usbdevice_fs.h
401header-y += usbip.h
398header-y += utime.h 402header-y += utime.h
399header-y += utsname.h 403header-y += utsname.h
400header-y += uuid.h 404header-y += uuid.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
new file mode 100644
index 000000000000..31b0ac208a52
--- /dev/null
+++ b/include/uapi/linux/bpf.h
@@ -0,0 +1,155 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _UAPI__LINUX_BPF_H__
8#define _UAPI__LINUX_BPF_H__
9
10#include <linux/types.h>
11
12/* Extended instruction set based on top of classic BPF */
13
14/* instruction classes */
15#define BPF_ALU64 0x07 /* alu mode in double word width */
16
17/* ld/ldx fields */
18#define BPF_DW 0x18 /* double word */
19#define BPF_XADD 0xc0 /* exclusive add */
20
21/* alu/jmp fields */
22#define BPF_MOV 0xb0 /* mov reg to reg */
23#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
24
25/* change endianness of a register */
26#define BPF_END 0xd0 /* flags for endianness conversion: */
27#define BPF_TO_LE 0x00 /* convert to little-endian */
28#define BPF_TO_BE 0x08 /* convert to big-endian */
29#define BPF_FROM_LE BPF_TO_LE
30#define BPF_FROM_BE BPF_TO_BE
31
32#define BPF_JNE 0x50 /* jump != */
33#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
34#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
35#define BPF_CALL 0x80 /* function call */
36#define BPF_EXIT 0x90 /* function return */
37
38/* Register numbers */
39enum {
40 BPF_REG_0 = 0,
41 BPF_REG_1,
42 BPF_REG_2,
43 BPF_REG_3,
44 BPF_REG_4,
45 BPF_REG_5,
46 BPF_REG_6,
47 BPF_REG_7,
48 BPF_REG_8,
49 BPF_REG_9,
50 BPF_REG_10,
51 __MAX_BPF_REG,
52};
53
54/* BPF has 10 general purpose 64-bit registers and stack frame. */
55#define MAX_BPF_REG __MAX_BPF_REG
56
57struct bpf_insn {
58 __u8 code; /* opcode */
59 __u8 dst_reg:4; /* dest register */
60 __u8 src_reg:4; /* source register */
61 __s16 off; /* signed offset */
62 __s32 imm; /* signed immediate constant */
63};
64
65/* BPF syscall commands */
66enum bpf_cmd {
67 /* create a map with given type and attributes
68 * fd = bpf(BPF_MAP_CREATE, union bpf_attr *, u32 size)
69 * returns fd or negative error
70 * map is deleted when fd is closed
71 */
72 BPF_MAP_CREATE,
73
74 /* lookup key in a given map
75 * err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
76 * Using attr->map_fd, attr->key, attr->value
77 * returns zero and stores found elem into value
78 * or negative error
79 */
80 BPF_MAP_LOOKUP_ELEM,
81
82 /* create or update key/value pair in a given map
83 * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
84 * Using attr->map_fd, attr->key, attr->value
85 * returns zero or negative error
86 */
87 BPF_MAP_UPDATE_ELEM,
88
89 /* find and delete elem by key in a given map
90 * err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
91 * Using attr->map_fd, attr->key
92 * returns zero or negative error
93 */
94 BPF_MAP_DELETE_ELEM,
95
96 /* lookup key in a given map and return next key
97 * err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
98 * Using attr->map_fd, attr->key, attr->next_key
99 * returns zero and stores next key or negative error
100 */
101 BPF_MAP_GET_NEXT_KEY,
102
103 /* verify and load eBPF program
104 * prog_fd = bpf(BPF_PROG_LOAD, union bpf_attr *attr, u32 size)
105 * Using attr->prog_type, attr->insns, attr->license
106 * returns fd or negative error
107 */
108 BPF_PROG_LOAD,
109};
110
111enum bpf_map_type {
112 BPF_MAP_TYPE_UNSPEC,
113};
114
115enum bpf_prog_type {
116 BPF_PROG_TYPE_UNSPEC,
117};
118
119union bpf_attr {
120 struct { /* anonymous struct used by BPF_MAP_CREATE command */
121 __u32 map_type; /* one of enum bpf_map_type */
122 __u32 key_size; /* size of key in bytes */
123 __u32 value_size; /* size of value in bytes */
124 __u32 max_entries; /* max number of entries in a map */
125 };
126
127 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
128 __u32 map_fd;
129 __aligned_u64 key;
130 union {
131 __aligned_u64 value;
132 __aligned_u64 next_key;
133 };
134 };
135
136 struct { /* anonymous struct used by BPF_PROG_LOAD command */
137 __u32 prog_type; /* one of enum bpf_prog_type */
138 __u32 insn_cnt;
139 __aligned_u64 insns;
140 __aligned_u64 license;
141 __u32 log_level; /* verbosity level of verifier */
142 __u32 log_size; /* size of user buffer */
143 __aligned_u64 log_buf; /* user supplied buffer */
144 };
145} __attribute__((aligned(8)));
146
147/* integer value in 'imm' field of BPF_CALL instruction selects which helper
148 * function eBPF program intends to call
149 */
150enum bpf_func_id {
151 BPF_FUNC_unspec,
152 __BPF_FUNC_MAX_ID,
153};
154
155#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index e3c7a719c76b..99b43056a6fe 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -209,6 +209,33 @@ struct ethtool_value {
209 __u32 data; 209 __u32 data;
210}; 210};
211 211
212enum tunable_id {
213 ETHTOOL_ID_UNSPEC,
214 ETHTOOL_RX_COPYBREAK,
215 ETHTOOL_TX_COPYBREAK,
216};
217
218enum tunable_type_id {
219 ETHTOOL_TUNABLE_UNSPEC,
220 ETHTOOL_TUNABLE_U8,
221 ETHTOOL_TUNABLE_U16,
222 ETHTOOL_TUNABLE_U32,
223 ETHTOOL_TUNABLE_U64,
224 ETHTOOL_TUNABLE_STRING,
225 ETHTOOL_TUNABLE_S8,
226 ETHTOOL_TUNABLE_S16,
227 ETHTOOL_TUNABLE_S32,
228 ETHTOOL_TUNABLE_S64,
229};
230
231struct ethtool_tunable {
232 __u32 cmd;
233 __u32 id;
234 __u32 type_id;
235 __u32 len;
236 void *data[0];
237};
238
212/** 239/**
213 * struct ethtool_regs - hardware register dump 240 * struct ethtool_regs - hardware register dump
214 * @cmd: Command number = %ETHTOOL_GREGS 241 * @cmd: Command number = %ETHTOOL_GREGS
@@ -1152,6 +1179,8 @@ enum ethtool_sfeatures_retval_bits {
1152 1179
1153#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */ 1180#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */
1154#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */ 1181#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */
1182#define ETHTOOL_GTUNABLE 0x00000048 /* Get tunable configuration */
1183#define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */
1155 1184
1156/* compatibility with older code */ 1185/* compatibility with older code */
1157#define SPARC_ETH_GSET ETHTOOL_GSET 1186#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
new file mode 100644
index 000000000000..8df06894da23
--- /dev/null
+++ b/include/uapi/linux/fou.h
@@ -0,0 +1,39 @@
1/* fou.h - FOU Interface */
2
3#ifndef _UAPI_LINUX_FOU_H
4#define _UAPI_LINUX_FOU_H
5
6/* NETLINK_GENERIC related info
7 */
8#define FOU_GENL_NAME "fou"
9#define FOU_GENL_VERSION 0x1
10
11enum {
12 FOU_ATTR_UNSPEC,
13 FOU_ATTR_PORT, /* u16 */
14 FOU_ATTR_AF, /* u8 */
15 FOU_ATTR_IPPROTO, /* u8 */
16 FOU_ATTR_TYPE, /* u8 */
17
18 __FOU_ATTR_MAX,
19};
20
21#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1)
22
23enum {
24 FOU_CMD_UNSPEC,
25 FOU_CMD_ADD,
26 FOU_CMD_DEL,
27
28 __FOU_CMD_MAX,
29};
30
31enum {
32 FOU_ENCAP_UNSPEC,
33 FOU_ENCAP_DIRECT,
34 FOU_ENCAP_GUE,
35};
36
37#define FOU_CMD_MAX (__FOU_CMD_MAX - 1)
38
39#endif /* _UAPI_LINUX_FOU_H */
diff --git a/include/uapi/linux/genwqe/genwqe_card.h b/include/uapi/linux/genwqe/genwqe_card.h
index 4fc065f29255..baa93fb4cd4f 100644
--- a/include/uapi/linux/genwqe/genwqe_card.h
+++ b/include/uapi/linux/genwqe/genwqe_card.h
@@ -8,7 +8,7 @@
8 * 8 *
9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> 9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> 10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
11 * Author: Michael Jung <mijung@de.ibm.com> 11 * Author: Michael Jung <mijung@gmx.net>
12 * Author: Michael Ruettger <michael@ibmra.de> 12 * Author: Michael Ruettger <michael@ibmra.de>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index 78e4a86030dd..0a8e6badb29b 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -137,7 +137,7 @@ struct hv_do_fcopy {
137 __u64 offset; 137 __u64 offset;
138 __u32 size; 138 __u32 size;
139 __u8 data[DATA_FRAGMENT]; 139 __u8 data[DATA_FRAGMENT];
140}; 140} __attribute__((packed));
141 141
142/* 142/*
143 * An implementation of HyperV key value pair (KVP) functionality for Linux. 143 * An implementation of HyperV key value pair (KVP) functionality for Linux.
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 0f8210b8e0bc..aa63ed023c2b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -128,6 +128,7 @@
128#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ 128#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
129#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ 129#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
130#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */ 130#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */
131#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */
131 132
132/* 133/*
133 * This is an Ethernet frame header. 134 * This is an Ethernet frame header.
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index ff957604a721..0bdb77e16875 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -215,6 +215,18 @@ enum in6_addr_gen_mode {
215 IN6_ADDR_GEN_MODE_NONE, 215 IN6_ADDR_GEN_MODE_NONE,
216}; 216};
217 217
218/* Bridge section */
219
220enum {
221 IFLA_BR_UNSPEC,
222 IFLA_BR_FORWARD_DELAY,
223 IFLA_BR_HELLO_TIME,
224 IFLA_BR_MAX_AGE,
225 __IFLA_BR_MAX,
226};
227
228#define IFLA_BR_MAX (__IFLA_BR_MAX - 1)
229
218enum { 230enum {
219 BRIDGE_MODE_UNSPEC, 231 BRIDGE_MODE_UNSPEC,
220 BRIDGE_MODE_HAIRPIN, 232 BRIDGE_MODE_HAIRPIN,
@@ -291,6 +303,10 @@ enum {
291 IFLA_MACVLAN_UNSPEC, 303 IFLA_MACVLAN_UNSPEC,
292 IFLA_MACVLAN_MODE, 304 IFLA_MACVLAN_MODE,
293 IFLA_MACVLAN_FLAGS, 305 IFLA_MACVLAN_FLAGS,
306 IFLA_MACVLAN_MACADDR_MODE,
307 IFLA_MACVLAN_MACADDR,
308 IFLA_MACVLAN_MACADDR_DATA,
309 IFLA_MACVLAN_MACADDR_COUNT,
294 __IFLA_MACVLAN_MAX, 310 __IFLA_MACVLAN_MAX,
295}; 311};
296 312
@@ -301,6 +317,14 @@ enum macvlan_mode {
301 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ 317 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
302 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ 318 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
303 MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ 319 MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
320 MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */
321};
322
323enum macvlan_macaddr_mode {
324 MACVLAN_MACADDR_ADD,
325 MACVLAN_MACADDR_DEL,
326 MACVLAN_MACADDR_FLUSH,
327 MACVLAN_MACADDR_SET,
304}; 328};
305 329
306#define MACVLAN_FLAG_NOPROMISC 1 330#define MACVLAN_FLAG_NOPROMISC 1
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 3bce9e9d9f7c..280d9e092283 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -53,10 +53,23 @@ enum {
53 IFLA_IPTUN_6RD_RELAY_PREFIX, 53 IFLA_IPTUN_6RD_RELAY_PREFIX,
54 IFLA_IPTUN_6RD_PREFIXLEN, 54 IFLA_IPTUN_6RD_PREFIXLEN,
55 IFLA_IPTUN_6RD_RELAY_PREFIXLEN, 55 IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
56 IFLA_IPTUN_ENCAP_TYPE,
57 IFLA_IPTUN_ENCAP_FLAGS,
58 IFLA_IPTUN_ENCAP_SPORT,
59 IFLA_IPTUN_ENCAP_DPORT,
56 __IFLA_IPTUN_MAX, 60 __IFLA_IPTUN_MAX,
57}; 61};
58#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) 62#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
59 63
64enum tunnel_encap_types {
65 TUNNEL_ENCAP_NONE,
66 TUNNEL_ENCAP_FOU,
67 TUNNEL_ENCAP_GUE,
68};
69
70#define TUNNEL_ENCAP_FLAG_CSUM (1<<0)
71#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1)
72
60/* SIT-mode i_flags */ 73/* SIT-mode i_flags */
61#define SIT_ISATAP 0x0001 74#define SIT_ISATAP 0x0001
62 75
@@ -94,6 +107,10 @@ enum {
94 IFLA_GRE_ENCAP_LIMIT, 107 IFLA_GRE_ENCAP_LIMIT,
95 IFLA_GRE_FLOWINFO, 108 IFLA_GRE_FLOWINFO,
96 IFLA_GRE_FLAGS, 109 IFLA_GRE_FLAGS,
110 IFLA_GRE_ENCAP_TYPE,
111 IFLA_GRE_ENCAP_FLAGS,
112 IFLA_GRE_ENCAP_SPORT,
113 IFLA_GRE_ENCAP_DPORT,
97 __IFLA_GRE_MAX, 114 __IFLA_GRE_MAX,
98}; 115};
99 116
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index bbde90fa5838..d65c0a09efd3 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -110,10 +110,10 @@ enum {
110 INET_DIAG_TCLASS, 110 INET_DIAG_TCLASS,
111 INET_DIAG_SKMEMINFO, 111 INET_DIAG_SKMEMINFO,
112 INET_DIAG_SHUTDOWN, 112 INET_DIAG_SHUTDOWN,
113 INET_DIAG_DCTCPINFO,
113}; 114};
114 115
115#define INET_DIAG_MAX INET_DIAG_SHUTDOWN 116#define INET_DIAG_MAX INET_DIAG_DCTCPINFO
116
117 117
118/* INET_DIAG_MEM */ 118/* INET_DIAG_MEM */
119 119
@@ -133,5 +133,14 @@ struct tcpvegas_info {
133 __u32 tcpv_minrtt; 133 __u32 tcpv_minrtt;
134}; 134};
135 135
136/* INET_DIAG_DCTCPINFO */
137
138struct tcp_dctcp_info {
139 __u16 dctcp_enabled;
140 __u16 dctcp_ce_state;
141 __u32 dctcp_alpha;
142 __u32 dctcp_ab_ecn;
143 __u32 dctcp_ab_tot;
144};
136 145
137#endif /* _UAPI_INET_DIAG_H_ */ 146#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 19df18c9b8be..1874ebe9ac1e 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -165,6 +165,7 @@ struct input_keymap_entry {
165#define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */ 165#define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */
166#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */ 166#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
167#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */ 167#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
168#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
168 169
169#define INPUT_PROP_MAX 0x1f 170#define INPUT_PROP_MAX 0x1f
170#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1) 171#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index fbcffe8041f7..cabe95d5b461 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -384,6 +384,9 @@ enum {
384 IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */ 384 IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */
385 385
386 IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */ 386 IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */
387
388 IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */
389
387 __IPVS_DEST_ATTR_MAX, 390 __IPVS_DEST_ATTR_MAX,
388}; 391};
389 392
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 5116a0e48172..2f96d233c980 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -31,6 +31,7 @@
31 31
32#define KPF_KSM 21 32#define KPF_KSM 21
33#define KPF_THP 22 33#define KPF_THP 22
34#define KPF_BALLOON 23
34 35
35 36
36#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */ 37#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index cf3a2ff440e4..60768822b140 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -654,9 +654,7 @@ struct kvm_ppc_smmu_info {
654#endif 654#endif
655/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ 655/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
656#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 656#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
657#ifdef __KVM_HAVE_USER_NMI
658#define KVM_CAP_USER_NMI 22 657#define KVM_CAP_USER_NMI 22
659#endif
660#ifdef __KVM_HAVE_GUEST_DEBUG 658#ifdef __KVM_HAVE_GUEST_DEBUG
661#define KVM_CAP_SET_GUEST_DEBUG 23 659#define KVM_CAP_SET_GUEST_DEBUG 23
662#endif 660#endif
@@ -738,9 +736,7 @@ struct kvm_ppc_smmu_info {
738#define KVM_CAP_PPC_GET_SMMU_INFO 78 736#define KVM_CAP_PPC_GET_SMMU_INFO 78
739#define KVM_CAP_S390_COW 79 737#define KVM_CAP_S390_COW 79
740#define KVM_CAP_PPC_ALLOC_HTAB 80 738#define KVM_CAP_PPC_ALLOC_HTAB 80
741#ifdef __KVM_HAVE_READONLY_MEM
742#define KVM_CAP_READONLY_MEM 81 739#define KVM_CAP_READONLY_MEM 81
743#endif
744#define KVM_CAP_IRQFD_RESAMPLE 82 740#define KVM_CAP_IRQFD_RESAMPLE 82
745#define KVM_CAP_PPC_BOOKE_WATCHDOG 83 741#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
746#define KVM_CAP_PPC_HTAB_FD 84 742#define KVM_CAP_PPC_HTAB_FD 84
@@ -947,15 +943,25 @@ struct kvm_device_attr {
947 __u64 addr; /* userspace address of attr data */ 943 __u64 addr; /* userspace address of attr data */
948}; 944};
949 945
950#define KVM_DEV_TYPE_FSL_MPIC_20 1
951#define KVM_DEV_TYPE_FSL_MPIC_42 2
952#define KVM_DEV_TYPE_XICS 3
953#define KVM_DEV_TYPE_VFIO 4
954#define KVM_DEV_VFIO_GROUP 1 946#define KVM_DEV_VFIO_GROUP 1
955#define KVM_DEV_VFIO_GROUP_ADD 1 947#define KVM_DEV_VFIO_GROUP_ADD 1
956#define KVM_DEV_VFIO_GROUP_DEL 2 948#define KVM_DEV_VFIO_GROUP_DEL 2
957#define KVM_DEV_TYPE_ARM_VGIC_V2 5 949
958#define KVM_DEV_TYPE_FLIC 6 950enum kvm_device_type {
951 KVM_DEV_TYPE_FSL_MPIC_20 = 1,
952#define KVM_DEV_TYPE_FSL_MPIC_20 KVM_DEV_TYPE_FSL_MPIC_20
953 KVM_DEV_TYPE_FSL_MPIC_42,
954#define KVM_DEV_TYPE_FSL_MPIC_42 KVM_DEV_TYPE_FSL_MPIC_42
955 KVM_DEV_TYPE_XICS,
956#define KVM_DEV_TYPE_XICS KVM_DEV_TYPE_XICS
957 KVM_DEV_TYPE_VFIO,
958#define KVM_DEV_TYPE_VFIO KVM_DEV_TYPE_VFIO
959 KVM_DEV_TYPE_ARM_VGIC_V2,
960#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
961 KVM_DEV_TYPE_FLIC,
962#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
963 KVM_DEV_TYPE_MAX,
964};
959 965
960/* 966/*
961 * ioctls for VM fds 967 * ioctls for VM fds
@@ -1093,7 +1099,7 @@ struct kvm_s390_ucas_mapping {
1093#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) 1099#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
1094#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) 1100#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
1095#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) 1101#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
1096/* Available with KVM_CAP_NMI */ 1102/* Available with KVM_CAP_USER_NMI */
1097#define KVM_NMI _IO(KVMIO, 0x9a) 1103#define KVM_NMI _IO(KVMIO, 0x9a)
1098/* Available with KVM_CAP_SET_GUEST_DEBUG */ 1104/* Available with KVM_CAP_SET_GUEST_DEBUG */
1099#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug) 1105#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 78c2f2e79920..ca03119111a2 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -115,6 +115,9 @@ enum {
115 IPSET_ATTR_BYTES, 115 IPSET_ATTR_BYTES,
116 IPSET_ATTR_PACKETS, 116 IPSET_ATTR_PACKETS,
117 IPSET_ATTR_COMMENT, 117 IPSET_ATTR_COMMENT,
118 IPSET_ATTR_SKBMARK,
119 IPSET_ATTR_SKBPRIO,
120 IPSET_ATTR_SKBQUEUE,
118 __IPSET_ATTR_ADT_MAX, 121 __IPSET_ATTR_ADT_MAX,
119}; 122};
120#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) 123#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
@@ -147,6 +150,7 @@ enum ipset_errno {
147 IPSET_ERR_COUNTER, 150 IPSET_ERR_COUNTER,
148 IPSET_ERR_COMMENT, 151 IPSET_ERR_COMMENT,
149 IPSET_ERR_INVALID_MARKMASK, 152 IPSET_ERR_INVALID_MARKMASK,
153 IPSET_ERR_SKBINFO,
150 154
151 /* Type specific error codes */ 155 /* Type specific error codes */
152 IPSET_ERR_TYPE_SPECIFIC = 4352, 156 IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -170,6 +174,12 @@ enum ipset_cmd_flags {
170 IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS), 174 IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS),
171 IPSET_FLAG_BIT_RETURN_NOMATCH = 7, 175 IPSET_FLAG_BIT_RETURN_NOMATCH = 7,
172 IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH), 176 IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH),
177 IPSET_FLAG_BIT_MAP_SKBMARK = 8,
178 IPSET_FLAG_MAP_SKBMARK = (1 << IPSET_FLAG_BIT_MAP_SKBMARK),
179 IPSET_FLAG_BIT_MAP_SKBPRIO = 9,
180 IPSET_FLAG_MAP_SKBPRIO = (1 << IPSET_FLAG_BIT_MAP_SKBPRIO),
181 IPSET_FLAG_BIT_MAP_SKBQUEUE = 10,
182 IPSET_FLAG_MAP_SKBQUEUE = (1 << IPSET_FLAG_BIT_MAP_SKBQUEUE),
173 IPSET_FLAG_CMD_MAX = 15, 183 IPSET_FLAG_CMD_MAX = 15,
174}; 184};
175 185
@@ -187,6 +197,8 @@ enum ipset_cadt_flags {
187 IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT), 197 IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
188 IPSET_FLAG_BIT_WITH_FORCEADD = 5, 198 IPSET_FLAG_BIT_WITH_FORCEADD = 5,
189 IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD), 199 IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD),
200 IPSET_FLAG_BIT_WITH_SKBINFO = 6,
201 IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO),
190 IPSET_FLAG_CADT_MAX = 15, 202 IPSET_FLAG_CADT_MAX = 15,
191}; 203};
192 204
diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h
index 1ad3659102b6..0880781ad7b6 100644
--- a/include/uapi/linux/netfilter/nf_nat.h
+++ b/include/uapi/linux/netfilter/nf_nat.h
@@ -13,6 +13,11 @@
13#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ 13#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
14 (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) 14 (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
15 15
16#define NF_NAT_RANGE_MASK \
17 (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
18 NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
19 NF_NAT_RANGE_PROTO_RANDOM_FULLY)
20
16struct nf_nat_ipv4_range { 21struct nf_nat_ipv4_range {
17 unsigned int flags; 22 unsigned int flags;
18 __be32 min_ip; 23 __be32 min_ip;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 801bdd1e56e3..f31fe7b660a5 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -51,6 +51,8 @@ enum nft_verdicts {
51 * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes) 51 * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes)
52 * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes) 52 * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes)
53 * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes) 53 * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes)
54 * @NFT_MSG_NEWGEN: announce a new generation, only for events (enum nft_gen_attributes)
55 * @NFT_MSG_GETGEN: get the rule-set generation (enum nft_gen_attributes)
54 */ 56 */
55enum nf_tables_msg_types { 57enum nf_tables_msg_types {
56 NFT_MSG_NEWTABLE, 58 NFT_MSG_NEWTABLE,
@@ -68,6 +70,8 @@ enum nf_tables_msg_types {
68 NFT_MSG_NEWSETELEM, 70 NFT_MSG_NEWSETELEM,
69 NFT_MSG_GETSETELEM, 71 NFT_MSG_GETSETELEM,
70 NFT_MSG_DELSETELEM, 72 NFT_MSG_DELSETELEM,
73 NFT_MSG_NEWGEN,
74 NFT_MSG_GETGEN,
71 NFT_MSG_MAX, 75 NFT_MSG_MAX,
72}; 76};
73 77
@@ -571,6 +575,10 @@ enum nft_exthdr_attributes {
571 * @NFT_META_L4PROTO: layer 4 protocol number 575 * @NFT_META_L4PROTO: layer 4 protocol number
572 * @NFT_META_BRI_IIFNAME: packet input bridge interface name 576 * @NFT_META_BRI_IIFNAME: packet input bridge interface name
573 * @NFT_META_BRI_OIFNAME: packet output bridge interface name 577 * @NFT_META_BRI_OIFNAME: packet output bridge interface name
578 * @NFT_META_PKTTYPE: packet type (skb->pkt_type), special handling for loopback
579 * @NFT_META_CPU: cpu id through smp_processor_id()
580 * @NFT_META_IIFGROUP: packet input interface group
581 * @NFT_META_OIFGROUP: packet output interface group
574 */ 582 */
575enum nft_meta_keys { 583enum nft_meta_keys {
576 NFT_META_LEN, 584 NFT_META_LEN,
@@ -592,6 +600,10 @@ enum nft_meta_keys {
592 NFT_META_L4PROTO, 600 NFT_META_L4PROTO,
593 NFT_META_BRI_IIFNAME, 601 NFT_META_BRI_IIFNAME,
594 NFT_META_BRI_OIFNAME, 602 NFT_META_BRI_OIFNAME,
603 NFT_META_PKTTYPE,
604 NFT_META_CPU,
605 NFT_META_IIFGROUP,
606 NFT_META_OIFGROUP,
595}; 607};
596 608
597/** 609/**
@@ -737,13 +749,34 @@ enum nft_queue_attributes {
737 * 749 *
738 * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable 750 * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
739 * @NFT_REJECT_TCP_RST: reject using TCP RST 751 * @NFT_REJECT_TCP_RST: reject using TCP RST
752 * @NFT_REJECT_ICMPX_UNREACH: abstracted ICMP unreachable for bridge and inet
740 */ 753 */
741enum nft_reject_types { 754enum nft_reject_types {
742 NFT_REJECT_ICMP_UNREACH, 755 NFT_REJECT_ICMP_UNREACH,
743 NFT_REJECT_TCP_RST, 756 NFT_REJECT_TCP_RST,
757 NFT_REJECT_ICMPX_UNREACH,
744}; 758};
745 759
746/** 760/**
761 * enum nft_reject_code - Generic reject codes for IPv4/IPv6
762 *
763 * @NFT_REJECT_ICMPX_NO_ROUTE: no route to host / network unreachable
764 * @NFT_REJECT_ICMPX_PORT_UNREACH: port unreachable
765 * @NFT_REJECT_ICMPX_HOST_UNREACH: host unreachable
766 * @NFT_REJECT_ICMPX_ADMIN_PROHIBITED: administratively prohibited
767 *
768 * These codes are mapped to real ICMP and ICMPv6 codes.
769 */
770enum nft_reject_inet_code {
771 NFT_REJECT_ICMPX_NO_ROUTE = 0,
772 NFT_REJECT_ICMPX_PORT_UNREACH,
773 NFT_REJECT_ICMPX_HOST_UNREACH,
774 NFT_REJECT_ICMPX_ADMIN_PROHIBITED,
775 __NFT_REJECT_ICMPX_MAX
776};
777#define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX - 1)
778
779/**
747 * enum nft_reject_attributes - nf_tables reject expression netlink attributes 780 * enum nft_reject_attributes - nf_tables reject expression netlink attributes
748 * 781 *
749 * @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types) 782 * @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types)
@@ -777,6 +810,7 @@ enum nft_nat_types {
777 * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers) 810 * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers)
778 * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) 811 * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
779 * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) 812 * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
813 * @NFTA_NAT_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
780 */ 814 */
781enum nft_nat_attributes { 815enum nft_nat_attributes {
782 NFTA_NAT_UNSPEC, 816 NFTA_NAT_UNSPEC,
@@ -786,8 +820,33 @@ enum nft_nat_attributes {
786 NFTA_NAT_REG_ADDR_MAX, 820 NFTA_NAT_REG_ADDR_MAX,
787 NFTA_NAT_REG_PROTO_MIN, 821 NFTA_NAT_REG_PROTO_MIN,
788 NFTA_NAT_REG_PROTO_MAX, 822 NFTA_NAT_REG_PROTO_MAX,
823 NFTA_NAT_FLAGS,
789 __NFTA_NAT_MAX 824 __NFTA_NAT_MAX
790}; 825};
791#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1) 826#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
792 827
828/**
829 * enum nft_masq_attributes - nf_tables masquerade expression attributes
830 *
831 * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
832 */
833enum nft_masq_attributes {
834 NFTA_MASQ_UNSPEC,
835 NFTA_MASQ_FLAGS,
836 __NFTA_MASQ_MAX
837};
838#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1)
839
840/**
841 * enum nft_gen_attributes - nf_tables ruleset generation attributes
842 *
843 * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
844 */
845enum nft_gen_attributes {
846 NFTA_GEN_UNSPEC,
847 NFTA_GEN_ID,
848 __NFTA_GEN_MAX
849};
850#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1)
851
793#endif /* _LINUX_NF_TABLES_H */ 852#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h
index 51404ec19022..f3e34dbbf966 100644
--- a/include/uapi/linux/netfilter/nfnetlink_acct.h
+++ b/include/uapi/linux/netfilter/nfnetlink_acct.h
@@ -28,9 +28,17 @@ enum nfnl_acct_type {
28 NFACCT_USE, 28 NFACCT_USE,
29 NFACCT_FLAGS, 29 NFACCT_FLAGS,
30 NFACCT_QUOTA, 30 NFACCT_QUOTA,
31 NFACCT_FILTER,
31 __NFACCT_MAX 32 __NFACCT_MAX
32}; 33};
33#define NFACCT_MAX (__NFACCT_MAX - 1) 34#define NFACCT_MAX (__NFACCT_MAX - 1)
34 35
36enum nfnl_attr_filter_type {
37 NFACCT_FILTER_UNSPEC,
38 NFACCT_FILTER_MASK,
39 NFACCT_FILTER_VALUE,
40 __NFACCT_FILTER_MAX
41};
42#define NFACCT_FILTER_MAX (__NFACCT_FILTER_MAX - 1)
35 43
36#endif /* _UAPI_NFNL_ACCT_H_ */ 44#endif /* _UAPI_NFNL_ACCT_H_ */
diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h
index 964d3d42f874..d6a1df1f2947 100644
--- a/include/uapi/linux/netfilter/xt_set.h
+++ b/include/uapi/linux/netfilter/xt_set.h
@@ -71,4 +71,14 @@ struct xt_set_info_match_v3 {
71 __u32 flags; 71 __u32 flags;
72}; 72};
73 73
74/* Revision 3 target */
75
76struct xt_set_info_target_v3 {
77 struct xt_set_info add_set;
78 struct xt_set_info del_set;
79 struct xt_set_info map_set;
80 __u32 flags;
81 __u32 timeout;
82};
83
74#endif /*_XT_SET_H*/ 84#endif /*_XT_SET_H*/
diff --git a/include/uapi/linux/netfilter_arp/arpt_mangle.h b/include/uapi/linux/netfilter_arp/arpt_mangle.h
index 250f502902bb..8c2b16a1f5a0 100644
--- a/include/uapi/linux/netfilter_arp/arpt_mangle.h
+++ b/include/uapi/linux/netfilter_arp/arpt_mangle.h
@@ -13,7 +13,7 @@ struct arpt_mangle
13 union { 13 union {
14 struct in_addr tgt_ip; 14 struct in_addr tgt_ip;
15 } u_t; 15 } u_t;
16 u_int8_t flags; 16 __u8 flags;
17 int target; 17 int target;
18}; 18};
19 19
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index cf47c313794e..584b6ef3a5e8 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -28,7 +28,8 @@
28#define NFSEXP_ALLSQUASH 0x0008 28#define NFSEXP_ALLSQUASH 0x0008
29#define NFSEXP_ASYNC 0x0010 29#define NFSEXP_ASYNC 0x0010
30#define NFSEXP_GATHERED_WRITES 0x0020 30#define NFSEXP_GATHERED_WRITES 0x0020
31/* 40 80 100 currently unused */ 31#define NFSEXP_NOREADDIRPLUS 0x0040
32/* 80 100 currently unused */
32#define NFSEXP_NOHIDE 0x0200 33#define NFSEXP_NOHIDE 0x0200
33#define NFSEXP_NOSUBTREECHECK 0x0400 34#define NFSEXP_NOSUBTREECHECK 0x0400
34#define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ 35#define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */
@@ -47,7 +48,7 @@
47 */ 48 */
48#define NFSEXP_V4ROOT 0x10000 49#define NFSEXP_V4ROOT 0x10000
49/* All flags that we claim to support. (Note we don't support NOACL.) */ 50/* All flags that we claim to support. (Note we don't support NOACL.) */
50#define NFSEXP_ALLFLAGS 0x17E3F 51#define NFSEXP_ALLFLAGS 0x1FE7F
51 52
52/* The flags that may vary depending on security flavor: */ 53/* The flags that may vary depending on security flavor: */
53#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ 54#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f1db15b9c041..4b28dc07bcb1 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -722,6 +722,22 @@
722 * QoS mapping is relevant for IP packets, it is only valid during an 722 * QoS mapping is relevant for IP packets, it is only valid during an
723 * association. This is cleared on disassociation and AP restart. 723 * association. This is cleared on disassociation and AP restart.
724 * 724 *
725 * @NL80211_CMD_ADD_TX_TS: Ask the kernel to add a traffic stream for the given
726 * %NL80211_ATTR_TSID and %NL80211_ATTR_MAC with %NL80211_ATTR_USER_PRIO
727 * and %NL80211_ATTR_ADMITTED_TIME parameters.
728 * Note that the action frame handshake with the AP shall be handled by
729 * userspace via the normal management RX/TX framework, this only sets
730 * up the TX TS in the driver/device.
731 * If the admitted time attribute is not added then the request just checks
732 * if a subsequent setup could be successful, the intent is to use this to
733 * avoid setting up a session with the AP when local restrictions would
734 * make that impossible. However, the subsequent "real" setup may still
735 * fail even if the check was successful.
736 * @NL80211_CMD_DEL_TX_TS: Remove an existing TS with the %NL80211_ATTR_TSID
737 * and %NL80211_ATTR_MAC parameters. It isn't necessary to call this
738 * before removing a station entry entirely, or before disassociating
739 * or similar, cleanup will happen in the driver/device in this case.
740 *
725 * @NL80211_CMD_MAX: highest used command number 741 * @NL80211_CMD_MAX: highest used command number
726 * @__NL80211_CMD_AFTER_LAST: internal use 742 * @__NL80211_CMD_AFTER_LAST: internal use
727 */ 743 */
@@ -893,6 +909,9 @@ enum nl80211_commands {
893 909
894 NL80211_CMD_SET_QOS_MAP, 910 NL80211_CMD_SET_QOS_MAP,
895 911
912 NL80211_CMD_ADD_TX_TS,
913 NL80211_CMD_DEL_TX_TS,
914
896 /* add new commands above here */ 915 /* add new commands above here */
897 916
898 /* used to define NL80211_CMD_MAX below */ 917 /* used to define NL80211_CMD_MAX below */
@@ -1594,6 +1613,31 @@ enum nl80211_commands {
1594 * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is 1613 * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
1595 * the TDLS link initiator. 1614 * the TDLS link initiator.
1596 * 1615 *
1616 * @NL80211_ATTR_USE_RRM: flag for indicating whether the current connection
1617 * shall support Radio Resource Measurements (11k). This attribute can be
1618 * used with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests.
1619 * User space applications are expected to use this flag only if the
1620 * underlying device supports these minimal RRM features:
1621 * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES,
1622 * %NL80211_FEATURE_QUIET,
1623 * If this flag is used, driver must add the Power Capabilities IE to the
1624 * association request. In addition, it must also set the RRM capability
1625 * flag in the association request's Capability Info field.
1626 *
1627 * @NL80211_ATTR_WIPHY_DYN_ACK: flag attribute used to enable ACK timeout
1628 * estimation algorithm (dynack). In order to activate dynack
1629 * %NL80211_FEATURE_ACKTO_ESTIMATION feature flag must be set by lower
1630 * drivers to indicate dynack capability. Dynack is automatically disabled
1631 * setting valid value for coverage class.
1632 *
1633 * @NL80211_ATTR_TSID: a TSID value (u8 attribute)
1634 * @NL80211_ATTR_USER_PRIO: user priority value (u8 attribute)
1635 * @NL80211_ATTR_ADMITTED_TIME: admitted time in units of 32 microseconds
1636 * (per second) (u16 attribute)
1637 *
1638 * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see
1639 * &enum nl80211_smps_mode.
1640 *
1597 * @NL80211_ATTR_MAX: highest attribute number currently defined 1641 * @NL80211_ATTR_MAX: highest attribute number currently defined
1598 * @__NL80211_ATTR_AFTER_LAST: internal use 1642 * @__NL80211_ATTR_AFTER_LAST: internal use
1599 */ 1643 */
@@ -1936,6 +1980,16 @@ enum nl80211_attrs {
1936 1980
1937 NL80211_ATTR_TDLS_INITIATOR, 1981 NL80211_ATTR_TDLS_INITIATOR,
1938 1982
1983 NL80211_ATTR_USE_RRM,
1984
1985 NL80211_ATTR_WIPHY_DYN_ACK,
1986
1987 NL80211_ATTR_TSID,
1988 NL80211_ATTR_USER_PRIO,
1989 NL80211_ATTR_ADMITTED_TIME,
1990
1991 NL80211_ATTR_SMPS_MODE,
1992
1939 /* add attributes here, update the policy in nl80211.c */ 1993 /* add attributes here, update the policy in nl80211.c */
1940 1994
1941 __NL80211_ATTR_AFTER_LAST, 1995 __NL80211_ATTR_AFTER_LAST,
@@ -3055,14 +3109,20 @@ enum nl80211_bss_scan_width {
3055 * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) 3109 * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets)
3056 * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) 3110 * @NL80211_BSS_FREQUENCY: frequency in MHz (u32)
3057 * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) 3111 * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64)
3112 * (if @NL80211_BSS_PRESP_DATA is present then this is known to be
3113 * from a probe response, otherwise it may be from the same beacon
3114 * that the NL80211_BSS_BEACON_TSF will be from)
3058 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) 3115 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
3059 * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) 3116 * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16)
3060 * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the 3117 * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the
3061 * raw information elements from the probe response/beacon (bin); 3118 * raw information elements from the probe response/beacon (bin);
3062 * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are 3119 * if the %NL80211_BSS_BEACON_IES attribute is present and the data is
3063 * from a Probe Response frame; otherwise they are from a Beacon frame. 3120 * different then the IEs here are from a Probe Response frame; otherwise
3121 * they are from a Beacon frame.
3064 * However, if the driver does not indicate the source of the IEs, these 3122 * However, if the driver does not indicate the source of the IEs, these
3065 * IEs may be from either frame subtype. 3123 * IEs may be from either frame subtype.
3124 * If present, the @NL80211_BSS_PRESP_DATA attribute indicates that the
3125 * data here is known to be from a probe response, without any heuristics.
3066 * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon 3126 * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon
3067 * in mBm (100 * dBm) (s32) 3127 * in mBm (100 * dBm) (s32)
3068 * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon 3128 * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon
@@ -3074,6 +3134,10 @@ enum nl80211_bss_scan_width {
3074 * yet been received 3134 * yet been received
3075 * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel 3135 * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
3076 * (u32, enum nl80211_bss_scan_width) 3136 * (u32, enum nl80211_bss_scan_width)
3137 * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64)
3138 * (not present if no beacon frame has been received yet)
3139 * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and
3140 * @NL80211_BSS_TSF is known to be from a probe response (flag attribute)
3077 * @__NL80211_BSS_AFTER_LAST: internal 3141 * @__NL80211_BSS_AFTER_LAST: internal
3078 * @NL80211_BSS_MAX: highest BSS attribute 3142 * @NL80211_BSS_MAX: highest BSS attribute
3079 */ 3143 */
@@ -3091,6 +3155,8 @@ enum nl80211_bss {
3091 NL80211_BSS_SEEN_MS_AGO, 3155 NL80211_BSS_SEEN_MS_AGO,
3092 NL80211_BSS_BEACON_IES, 3156 NL80211_BSS_BEACON_IES,
3093 NL80211_BSS_CHAN_WIDTH, 3157 NL80211_BSS_CHAN_WIDTH,
3158 NL80211_BSS_BEACON_TSF,
3159 NL80211_BSS_PRESP_DATA,
3094 3160
3095 /* keep last */ 3161 /* keep last */
3096 __NL80211_BSS_AFTER_LAST, 3162 __NL80211_BSS_AFTER_LAST,
@@ -3956,6 +4022,26 @@ enum nl80211_ap_sme_features {
3956 * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic 4022 * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
3957 * channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the 4023 * channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
3958 * lifetime of a BSS. 4024 * lifetime of a BSS.
4025 * @NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES: This device adds a DS Parameter
4026 * Set IE to probe requests.
4027 * @NL80211_FEATURE_WFA_TPC_IE_IN_PROBES: This device adds a WFA TPC Report IE
4028 * to probe requests.
4029 * @NL80211_FEATURE_QUIET: This device, in client mode, supports Quiet Period
4030 * requests sent to it by an AP.
4031 * @NL80211_FEATURE_TX_POWER_INSERTION: This device is capable of inserting the
4032 * current tx power value into the TPC Report IE in the spectrum
4033 * management TPC Report action frame, and in the Radio Measurement Link
4034 * Measurement Report action frame.
4035 * @NL80211_FEATURE_ACKTO_ESTIMATION: This driver supports dynamic ACK timeout
4036 * estimation (dynack). %NL80211_ATTR_WIPHY_DYN_ACK flag attribute is used
4037 * to enable dynack.
4038 * @NL80211_FEATURE_STATIC_SMPS: Device supports static spatial
4039 * multiplexing powersave, ie. can turn off all but one chain
4040 * even on HT connections that should be using more chains.
4041 * @NL80211_FEATURE_DYNAMIC_SMPS: Device supports dynamic spatial
4042 * multiplexing powersave, ie. can turn off all but one chain
4043 * and then wake the rest up as required after, for example,
4044 * rts/cts handshake.
3959 */ 4045 */
3960enum nl80211_feature_flags { 4046enum nl80211_feature_flags {
3961 NL80211_FEATURE_SK_TX_STATUS = 1 << 0, 4047 NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
@@ -3977,6 +4063,13 @@ enum nl80211_feature_flags {
3977 NL80211_FEATURE_USERSPACE_MPM = 1 << 16, 4063 NL80211_FEATURE_USERSPACE_MPM = 1 << 16,
3978 NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17, 4064 NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17,
3979 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18, 4065 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18,
4066 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 1 << 19,
4067 NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 1 << 20,
4068 NL80211_FEATURE_QUIET = 1 << 21,
4069 NL80211_FEATURE_TX_POWER_INSERTION = 1 << 22,
4070 NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23,
4071 NL80211_FEATURE_STATIC_SMPS = 1 << 24,
4072 NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25,
3980}; 4073};
3981 4074
3982/** 4075/**
@@ -4051,6 +4144,25 @@ enum nl80211_acl_policy {
4051}; 4144};
4052 4145
4053/** 4146/**
4147 * enum nl80211_smps_mode - SMPS mode
4148 *
4149 * Requested SMPS mode (for AP mode)
4150 *
4151 * @NL80211_SMPS_OFF: SMPS off (use all antennas).
4152 * @NL80211_SMPS_STATIC: static SMPS (use a single antenna)
4153 * @NL80211_SMPS_DYNAMIC: dynamic smps (start with a single antenna and
4154 * turn on other antennas after CTS/RTS).
4155 */
4156enum nl80211_smps_mode {
4157 NL80211_SMPS_OFF,
4158 NL80211_SMPS_STATIC,
4159 NL80211_SMPS_DYNAMIC,
4160
4161 __NL80211_SMPS_AFTER_LAST,
4162 NL80211_SMPS_MAX = __NL80211_SMPS_AFTER_LAST - 1
4163};
4164
4165/**
4054 * enum nl80211_radar_event - type of radar event for DFS operation 4166 * enum nl80211_radar_event - type of radar event for DFS operation
4055 * 4167 *
4056 * Type of event to be used with NL80211_ATTR_RADAR_EVENT to inform userspace 4168 * Type of event to be used with NL80211_ATTR_RADAR_EVENT to inform userspace
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index a794d1dd7b40..435eabc5ffaa 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -192,6 +192,7 @@ enum ovs_vport_type {
192 OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ 192 OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
193 OVS_VPORT_TYPE_GRE, /* GRE tunnel. */ 193 OVS_VPORT_TYPE_GRE, /* GRE tunnel. */
194 OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */ 194 OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */
195 OVS_VPORT_TYPE_GENEVE, /* Geneve tunnel. */
195 __OVS_VPORT_TYPE_MAX 196 __OVS_VPORT_TYPE_MAX
196}; 197};
197 198
@@ -289,9 +290,12 @@ enum ovs_key_attr {
289 OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */ 290 OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */
290 OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */ 291 OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */
291 OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */ 292 OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */
293 OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash
294 is not computed by the datapath. */
295 OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
292 296
293#ifdef __KERNEL__ 297#ifdef __KERNEL__
294 OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */ 298 OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
295#endif 299#endif
296 __OVS_KEY_ATTR_MAX 300 __OVS_KEY_ATTR_MAX
297}; 301};
@@ -306,6 +310,8 @@ enum ovs_tunnel_key_attr {
306 OVS_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */ 310 OVS_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */
307 OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */ 311 OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */
308 OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ 312 OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */
313 OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */
314 OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */
309 __OVS_TUNNEL_KEY_ATTR_MAX 315 __OVS_TUNNEL_KEY_ATTR_MAX
310}; 316};
311 317
@@ -493,6 +499,27 @@ struct ovs_action_push_vlan {
493 __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ 499 __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
494}; 500};
495 501
502/* Data path hash algorithm for computing Datapath hash.
503 *
504 * The algorithm type only specifies the fields in a flow
505 * will be used as part of the hash. Each datapath is free
506 * to use its own hash algorithm. The hash value will be
507 * opaque to the user space daemon.
508 */
509enum ovs_hash_alg {
510 OVS_HASH_ALG_L4,
511};
512
513/*
514 * struct ovs_action_hash - %OVS_ACTION_ATTR_HASH action argument.
515 * @hash_alg: Algorithm used to compute hash prior to recirculation.
516 * @hash_basis: basis used for computing hash.
517 */
518struct ovs_action_hash {
519 uint32_t hash_alg; /* One of ovs_hash_alg. */
520 uint32_t hash_basis;
521};
522
496/** 523/**
497 * enum ovs_action_attr - Action types. 524 * enum ovs_action_attr - Action types.
498 * 525 *
@@ -521,6 +548,8 @@ enum ovs_action_attr {
521 OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ 548 OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */
522 OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ 549 OVS_ACTION_ATTR_POP_VLAN, /* No argument. */
523 OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ 550 OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */
551 OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */
552 OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */
524 __OVS_ACTION_ATTR_MAX 553 __OVS_ACTION_ATTR_MAX
525}; 554};
526 555
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 30db069bce62..4a1d0cc38ff2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -552,6 +552,7 @@
552#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */ 552#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
553#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */ 553#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
554#define PCI_EXP_RTCAP 30 /* Root Capabilities */ 554#define PCI_EXP_RTCAP 30 /* Root Capabilities */
555#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
555#define PCI_EXP_RTSTA 32 /* Root Status */ 556#define PCI_EXP_RTSTA 32 /* Root Status */
556#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ 557#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
557#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ 558#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
@@ -630,7 +631,7 @@
630 631
631/* Advanced Error Reporting */ 632/* Advanced Error Reporting */
632#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ 633#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
633#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */ 634#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
634#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */ 635#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
635#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */ 636#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
636#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */ 637#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 58afc04c107e..513df75d0fc9 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_PRCTL_H 1#ifndef _LINUX_PRCTL_H
2#define _LINUX_PRCTL_H 2#define _LINUX_PRCTL_H
3 3
4#include <linux/types.h>
5
4/* Values to pass as first argument to prctl() */ 6/* Values to pass as first argument to prctl() */
5 7
6#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */ 8#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */
@@ -119,6 +121,31 @@
119# define PR_SET_MM_ENV_END 11 121# define PR_SET_MM_ENV_END 11
120# define PR_SET_MM_AUXV 12 122# define PR_SET_MM_AUXV 12
121# define PR_SET_MM_EXE_FILE 13 123# define PR_SET_MM_EXE_FILE 13
124# define PR_SET_MM_MAP 14
125# define PR_SET_MM_MAP_SIZE 15
126
127/*
128 * This structure provides new memory descriptor
129 * map which mostly modifies /proc/pid/stat[m]
130 * output for a task. This mostly done in a
131 * sake of checkpoint/restore functionality.
132 */
133struct prctl_mm_map {
134 __u64 start_code; /* code section bounds */
135 __u64 end_code;
136 __u64 start_data; /* data section bounds */
137 __u64 end_data;
138 __u64 start_brk; /* heap for brk() syscall */
139 __u64 brk;
140 __u64 start_stack; /* stack starts at */
141 __u64 arg_start; /* command line arguments bounds */
142 __u64 arg_end;
143 __u64 env_start; /* environment variables bounds */
144 __u64 env_end;
145 __u64 *auxv; /* auxiliary vector */
146 __u32 auxv_size; /* vector size */
147 __u32 exe_fd; /* /proc/$pid/exe link file */
148};
122 149
123/* 150/*
124 * Set specific pid that is allowed to ptrace the current task. 151 * Set specific pid that is allowed to ptrace the current task.
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 5820269aa132..16ad8521af6a 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -244,4 +244,7 @@
244/* SC16IS74xx */ 244/* SC16IS74xx */
245#define PORT_SC16IS7XX 108 245#define PORT_SC16IS7XX 108
246 246
247/* MESON */
248#define PORT_MESON 109
249
247#endif /* _UAPILINUX_SERIAL_CORE_H */ 250#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/smiapp.h b/include/uapi/linux/smiapp.h
new file mode 100644
index 000000000000..53938f4412ee
--- /dev/null
+++ b/include/uapi/linux/smiapp.h
@@ -0,0 +1,29 @@
1/*
2 * include/uapi/linux/smiapp.h
3 *
4 * Generic driver for SMIA/SMIA++ compliant camera modules
5 *
6 * Copyright (C) 2014 Intel Corporation
7 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19
20#ifndef __UAPI_LINUX_SMIAPP_H_
21#define __UAPI_LINUX_SMIAPP_H_
22
23#define V4L2_SMIAPP_TEST_PATTERN_MODE_DISABLED 0
24#define V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR 1
25#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS 2
26#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS_GREY 3
27#define V4L2_SMIAPP_TEST_PATTERN_MODE_PN9 4
28
29#endif /* __UAPI_LINUX_SMIAPP_H_ */
diff --git a/include/uapi/linux/uhid.h b/include/uapi/linux/uhid.h
index 1e3b09c191cd..aaa86d6bd1dd 100644
--- a/include/uapi/linux/uhid.h
+++ b/include/uapi/linux/uhid.h
@@ -24,35 +24,23 @@
24#include <linux/hid.h> 24#include <linux/hid.h>
25 25
26enum uhid_event_type { 26enum uhid_event_type {
27 UHID_CREATE, 27 __UHID_LEGACY_CREATE,
28 UHID_DESTROY, 28 UHID_DESTROY,
29 UHID_START, 29 UHID_START,
30 UHID_STOP, 30 UHID_STOP,
31 UHID_OPEN, 31 UHID_OPEN,
32 UHID_CLOSE, 32 UHID_CLOSE,
33 UHID_OUTPUT, 33 UHID_OUTPUT,
34 UHID_OUTPUT_EV, /* obsolete! */ 34 __UHID_LEGACY_OUTPUT_EV,
35 UHID_INPUT, 35 __UHID_LEGACY_INPUT,
36 UHID_FEATURE, 36 UHID_GET_REPORT,
37 UHID_FEATURE_ANSWER, 37 UHID_GET_REPORT_REPLY,
38 UHID_CREATE2, 38 UHID_CREATE2,
39 UHID_INPUT2, 39 UHID_INPUT2,
40 UHID_SET_REPORT,
41 UHID_SET_REPORT_REPLY,
40}; 42};
41 43
42struct uhid_create_req {
43 __u8 name[128];
44 __u8 phys[64];
45 __u8 uniq[64];
46 __u8 __user *rd_data;
47 __u16 rd_size;
48
49 __u16 bus;
50 __u32 vendor;
51 __u32 product;
52 __u32 version;
53 __u32 country;
54} __attribute__((__packed__));
55
56struct uhid_create2_req { 44struct uhid_create2_req {
57 __u8 name[128]; 45 __u8 name[128];
58 __u8 phys[64]; 46 __u8 phys[64];
@@ -66,6 +54,16 @@ struct uhid_create2_req {
66 __u8 rd_data[HID_MAX_DESCRIPTOR_SIZE]; 54 __u8 rd_data[HID_MAX_DESCRIPTOR_SIZE];
67} __attribute__((__packed__)); 55} __attribute__((__packed__));
68 56
57enum uhid_dev_flag {
58 UHID_DEV_NUMBERED_FEATURE_REPORTS = (1ULL << 0),
59 UHID_DEV_NUMBERED_OUTPUT_REPORTS = (1ULL << 1),
60 UHID_DEV_NUMBERED_INPUT_REPORTS = (1ULL << 2),
61};
62
63struct uhid_start_req {
64 __u64 dev_flags;
65};
66
69#define UHID_DATA_MAX 4096 67#define UHID_DATA_MAX 4096
70 68
71enum uhid_report_type { 69enum uhid_report_type {
@@ -74,36 +72,94 @@ enum uhid_report_type {
74 UHID_INPUT_REPORT, 72 UHID_INPUT_REPORT,
75}; 73};
76 74
77struct uhid_input_req { 75struct uhid_input2_req {
76 __u16 size;
77 __u8 data[UHID_DATA_MAX];
78} __attribute__((__packed__));
79
80struct uhid_output_req {
78 __u8 data[UHID_DATA_MAX]; 81 __u8 data[UHID_DATA_MAX];
79 __u16 size; 82 __u16 size;
83 __u8 rtype;
80} __attribute__((__packed__)); 84} __attribute__((__packed__));
81 85
82struct uhid_input2_req { 86struct uhid_get_report_req {
87 __u32 id;
88 __u8 rnum;
89 __u8 rtype;
90} __attribute__((__packed__));
91
92struct uhid_get_report_reply_req {
93 __u32 id;
94 __u16 err;
83 __u16 size; 95 __u16 size;
84 __u8 data[UHID_DATA_MAX]; 96 __u8 data[UHID_DATA_MAX];
85} __attribute__((__packed__)); 97} __attribute__((__packed__));
86 98
87struct uhid_output_req { 99struct uhid_set_report_req {
100 __u32 id;
101 __u8 rnum;
102 __u8 rtype;
103 __u16 size;
104 __u8 data[UHID_DATA_MAX];
105} __attribute__((__packed__));
106
107struct uhid_set_report_reply_req {
108 __u32 id;
109 __u16 err;
110} __attribute__((__packed__));
111
112/*
113 * Compat Layer
114 * All these commands and requests are obsolete. You should avoid using them in
115 * new code. We support them for backwards-compatibility, but you might not get
116 * access to new feature in case you use them.
117 */
118
119enum uhid_legacy_event_type {
120 UHID_CREATE = __UHID_LEGACY_CREATE,
121 UHID_OUTPUT_EV = __UHID_LEGACY_OUTPUT_EV,
122 UHID_INPUT = __UHID_LEGACY_INPUT,
123 UHID_FEATURE = UHID_GET_REPORT,
124 UHID_FEATURE_ANSWER = UHID_GET_REPORT_REPLY,
125};
126
127/* Obsolete! Use UHID_CREATE2. */
128struct uhid_create_req {
129 __u8 name[128];
130 __u8 phys[64];
131 __u8 uniq[64];
132 __u8 __user *rd_data;
133 __u16 rd_size;
134
135 __u16 bus;
136 __u32 vendor;
137 __u32 product;
138 __u32 version;
139 __u32 country;
140} __attribute__((__packed__));
141
142/* Obsolete! Use UHID_INPUT2. */
143struct uhid_input_req {
88 __u8 data[UHID_DATA_MAX]; 144 __u8 data[UHID_DATA_MAX];
89 __u16 size; 145 __u16 size;
90 __u8 rtype;
91} __attribute__((__packed__)); 146} __attribute__((__packed__));
92 147
93/* Obsolete! Newer kernels will no longer send these events but instead convert 148/* Obsolete! Kernel uses UHID_OUTPUT exclusively now. */
94 * it into raw output reports via UHID_OUTPUT. */
95struct uhid_output_ev_req { 149struct uhid_output_ev_req {
96 __u16 type; 150 __u16 type;
97 __u16 code; 151 __u16 code;
98 __s32 value; 152 __s32 value;
99} __attribute__((__packed__)); 153} __attribute__((__packed__));
100 154
155/* Obsolete! Kernel uses ABI compatible UHID_GET_REPORT. */
101struct uhid_feature_req { 156struct uhid_feature_req {
102 __u32 id; 157 __u32 id;
103 __u8 rnum; 158 __u8 rnum;
104 __u8 rtype; 159 __u8 rtype;
105} __attribute__((__packed__)); 160} __attribute__((__packed__));
106 161
162/* Obsolete! Use ABI compatible UHID_GET_REPORT_REPLY. */
107struct uhid_feature_answer_req { 163struct uhid_feature_answer_req {
108 __u32 id; 164 __u32 id;
109 __u16 err; 165 __u16 err;
@@ -111,6 +167,15 @@ struct uhid_feature_answer_req {
111 __u8 data[UHID_DATA_MAX]; 167 __u8 data[UHID_DATA_MAX];
112} __attribute__((__packed__)); 168} __attribute__((__packed__));
113 169
170/*
171 * UHID Events
172 * All UHID events from and to the kernel are encoded as "struct uhid_event".
173 * The "type" field contains a UHID_* type identifier. All payload depends on
174 * that type and can be accessed via ev->u.XYZ accordingly.
175 * If user-space writes short events, they're extended with 0s by the kernel. If
176 * the kernel writes short events, user-space shall extend them with 0s.
177 */
178
114struct uhid_event { 179struct uhid_event {
115 __u32 type; 180 __u32 type;
116 181
@@ -120,9 +185,14 @@ struct uhid_event {
120 struct uhid_output_req output; 185 struct uhid_output_req output;
121 struct uhid_output_ev_req output_ev; 186 struct uhid_output_ev_req output_ev;
122 struct uhid_feature_req feature; 187 struct uhid_feature_req feature;
188 struct uhid_get_report_req get_report;
123 struct uhid_feature_answer_req feature_answer; 189 struct uhid_feature_answer_req feature_answer;
190 struct uhid_get_report_reply_req get_report_reply;
124 struct uhid_create2_req create2; 191 struct uhid_create2_req create2;
125 struct uhid_input2_req input2; 192 struct uhid_input2_req input2;
193 struct uhid_set_report_req set_report;
194 struct uhid_set_report_reply_req set_report_reply;
195 struct uhid_start_req start;
126 } u; 196 } u;
127} __attribute__((__packed__)); 197} __attribute__((__packed__));
128 198
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 0154b2859fd7..295ba299e7bd 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -19,6 +19,7 @@ enum functionfs_flags {
19 FUNCTIONFS_HAS_HS_DESC = 2, 19 FUNCTIONFS_HAS_HS_DESC = 2,
20 FUNCTIONFS_HAS_SS_DESC = 4, 20 FUNCTIONFS_HAS_SS_DESC = 4,
21 FUNCTIONFS_HAS_MS_OS_DESC = 8, 21 FUNCTIONFS_HAS_MS_OS_DESC = 8,
22 FUNCTIONFS_VIRTUAL_ADDR = 16,
22}; 23};
23 24
24/* Descriptor of an non-audio endpoint */ 25/* Descriptor of an non-audio endpoint */
@@ -32,6 +33,16 @@ struct usb_endpoint_descriptor_no_audio {
32 __u8 bInterval; 33 __u8 bInterval;
33} __attribute__((packed)); 34} __attribute__((packed));
34 35
36struct usb_functionfs_descs_head_v2 {
37 __le32 magic;
38 __le32 length;
39 __le32 flags;
40 /*
41 * __le32 fs_count, hs_count, fs_count; must be included manually in
42 * the structure taking flags into consideration.
43 */
44} __attribute__((packed));
45
35/* Legacy format, deprecated as of 3.14. */ 46/* Legacy format, deprecated as of 3.14. */
36struct usb_functionfs_descs_head { 47struct usb_functionfs_descs_head {
37 __le32 magic; 48 __le32 magic;
@@ -92,7 +103,7 @@ struct usb_ext_prop_desc {
92 * structure. Any flags that are not recognised cause the whole block to be 103 * structure. Any flags that are not recognised cause the whole block to be
93 * rejected with -ENOSYS. 104 * rejected with -ENOSYS.
94 * 105 *
95 * Legacy descriptors format: 106 * Legacy descriptors format (deprecated as of 3.14):
96 * 107 *
97 * | off | name | type | description | 108 * | off | name | type | description |
98 * |-----+-----------+--------------+--------------------------------------| 109 * |-----+-----------+--------------+--------------------------------------|
@@ -265,6 +276,12 @@ struct usb_functionfs_event {
265 */ 276 */
266#define FUNCTIONFS_ENDPOINT_REVMAP _IO('g', 129) 277#define FUNCTIONFS_ENDPOINT_REVMAP _IO('g', 129)
267 278
279/*
280 * Returns endpoint descriptor. If function is not active returns -ENODEV.
281 */
282#define FUNCTIONFS_ENDPOINT_DESC _IOR('g', 130, \
283 struct usb_endpoint_descriptor)
284
268 285
269 286
270#endif /* _UAPI__LINUX_FUNCTIONFS_H__ */ 287#endif /* _UAPI__LINUX_FUNCTIONFS_H__ */
diff --git a/include/uapi/linux/usbip.h b/include/uapi/linux/usbip.h
new file mode 100644
index 000000000000..fa5db30ede36
--- /dev/null
+++ b/include/uapi/linux/usbip.h
@@ -0,0 +1,26 @@
1/*
2 * usbip.h
3 *
4 * USBIP uapi defines and function prototypes etc.
5*/
6
7#ifndef _UAPI_LINUX_USBIP_H
8#define _UAPI_LINUX_USBIP_H
9
10/* usbip device status - exported in usbip device sysfs status */
11enum usbip_device_status {
12 /* sdev is available. */
13 SDEV_ST_AVAILABLE = 0x01,
14 /* sdev is now used. */
15 SDEV_ST_USED,
16 /* sdev is unusable because of a fatal error. */
17 SDEV_ST_ERROR,
18
19 /* vdev does not connect a remote device. */
20 VDEV_ST_NULL,
21 /* vdev is used, but the USB address is not assigned yet */
22 VDEV_ST_NOTASSIGNED,
23 VDEV_ST_USED,
24 VDEV_ST_ERROR
25};
26#endif /* _UAPI_LINUX_USBIP_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e946e43fb8d5..661f119a51b8 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -746,6 +746,8 @@ enum v4l2_auto_focus_range {
746 V4L2_AUTO_FOCUS_RANGE_INFINITY = 3, 746 V4L2_AUTO_FOCUS_RANGE_INFINITY = 3,
747}; 747};
748 748
749#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32)
750#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33)
749 751
750/* FM Modulator class control IDs */ 752/* FM Modulator class control IDs */
751 753
@@ -865,6 +867,10 @@ enum v4l2_jpeg_chroma_subsampling {
865#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1) 867#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
866#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2) 868#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
867#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3) 869#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
870#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
871#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
872#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
873#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
868 874
869 875
870/* Image processing controls */ 876/* Image processing controls */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6c8f159e416e..6a0764c89fcb 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -21,17 +21,8 @@
21#ifndef _V4L2_DV_TIMINGS_H 21#ifndef _V4L2_DV_TIMINGS_H
22#define _V4L2_DV_TIMINGS_H 22#define _V4L2_DV_TIMINGS_H
23 23
24#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
25/* Sadly gcc versions older than 4.6 have a bug in how they initialize
26 anonymous unions where they require additional curly brackets.
27 This violates the C1x standard. This workaround adds the curly brackets
28 if needed. */
29#define V4L2_INIT_BT_TIMINGS(_width, args...) \ 24#define V4L2_INIT_BT_TIMINGS(_width, args...) \
30 { .bt = { _width , ## args } } 25 { .bt = { _width , ## args } }
31#else
32#define V4L2_INIT_BT_TIMINGS(_width, args...) \
33 .bt = { _width , ## args }
34#endif
35 26
36/* CEA-861-E timings (i.e. standard HDTV timings) */ 27/* CEA-861-E timings (i.e. standard HDTV timings) */
37 28
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 6612974c64bf..29715d27548f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -33,6 +33,9 @@
33/* Check if EEH is supported */ 33/* Check if EEH is supported */
34#define VFIO_EEH 5 34#define VFIO_EEH 5
35 35
36/* Two-stage IOMMU */
37#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
38
36/* 39/*
37 * The IOCTL interface is designed for extensibility by embedding the 40 * The IOCTL interface is designed for extensibility by embedding the
38 * structure length (argsz) and flags into structures passed between 41 * structure length (argsz) and flags into structures passed between
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 778a3298fb34..1c2f84fd4d99 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -79,6 +79,7 @@
79/* Four-character-code (FOURCC) */ 79/* Four-character-code (FOURCC) */
80#define v4l2_fourcc(a, b, c, d)\ 80#define v4l2_fourcc(a, b, c, d)\
81 ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) 81 ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
82#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31))
82 83
83/* 84/*
84 * E N U M S 85 * E N U M S
@@ -307,6 +308,8 @@ struct v4l2_pix_format {
307#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */ 308#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
308#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ 309#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
309#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ 310#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
311#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
312#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
310#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ 313#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
311#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */ 314#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
312#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ 315#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
@@ -1285,11 +1288,11 @@ struct v4l2_ext_control {
1285 union { 1288 union {
1286 __s32 value; 1289 __s32 value;
1287 __s64 value64; 1290 __s64 value64;
1288 char *string; 1291 char __user *string;
1289 __u8 *p_u8; 1292 __u8 __user *p_u8;
1290 __u16 *p_u16; 1293 __u16 __user *p_u16;
1291 __u32 *p_u32; 1294 __u32 __user *p_u32;
1292 void *ptr; 1295 void __user *ptr;
1293 }; 1296 };
1294} __attribute__ ((packed)); 1297} __attribute__ ((packed));
1295 1298
diff --git a/include/uapi/linux/wil6210_uapi.h b/include/uapi/linux/wil6210_uapi.h
new file mode 100644
index 000000000000..6a3cddd156c4
--- /dev/null
+++ b/include/uapi/linux/wil6210_uapi.h
@@ -0,0 +1,87 @@
1/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __WIL6210_UAPI_H__
18#define __WIL6210_UAPI_H__
19
20#if !defined(__KERNEL__)
21#define __user
22#endif
23
24#include <linux/sockios.h>
25
26/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1
27 * are used by Android devices to implement PNO (preferred network offload).
28 * Albeit it is temporary solution, use different numbers to avoid conflicts
29 */
30
31/**
32 * Perform 32-bit I/O operation to the card memory
33 *
34 * User code should arrange data in memory like this:
35 *
36 * struct wil_memio io;
37 * struct ifreq ifr = {
38 * .ifr_data = &io,
39 * };
40 */
41#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2)
42
43/**
44 * Perform block I/O operation to the card memory
45 *
46 * User code should arrange data in memory like this:
47 *
48 * void *buf;
49 * struct wil_memio_block io = {
50 * .block = buf,
51 * };
52 * struct ifreq ifr = {
53 * .ifr_data = &io,
54 * };
55 */
56#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3)
57
58/**
59 * operation to perform
60 *
61 * @wil_mmio_op_mask - bits defining operation,
62 * @wil_mmio_addr_mask - bits defining addressing mode
63 */
64enum wil_memio_op {
65 wil_mmio_read = 0,
66 wil_mmio_write = 1,
67 wil_mmio_op_mask = 0xff,
68 wil_mmio_addr_linker = 0 << 8,
69 wil_mmio_addr_ahb = 1 << 8,
70 wil_mmio_addr_bar = 2 << 8,
71 wil_mmio_addr_mask = 0xff00,
72};
73
74struct wil_memio {
75 uint32_t op; /* enum wil_memio_op */
76 uint32_t addr; /* should be 32-bit aligned */
77 uint32_t val;
78};
79
80struct wil_memio_block {
81 uint32_t op; /* enum wil_memio_op */
82 uint32_t addr; /* should be 32-bit aligned */
83 uint32_t size; /* should be multiple of 4 */
84 void __user *block; /* block address */
85};
86
87#endif /* __WIL6210_UAPI_H__ */
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index c38355c1f3c9..1590c49cae57 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -13,7 +13,7 @@
13#ifndef _UAPI_LINUX_XATTR_H 13#ifndef _UAPI_LINUX_XATTR_H
14#define _UAPI_LINUX_XATTR_H 14#define _UAPI_LINUX_XATTR_H
15 15
16#ifdef __UAPI_DEF_XATTR 16#if __UAPI_DEF_XATTR
17#define __USE_KERNEL_XATTR_DEFS 17#define __USE_KERNEL_XATTR_DEFS
18 18
19#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ 19#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 25e5dd916ba4..02d5125a5ee8 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -328,6 +328,8 @@ enum xfrm_spdattr_type_t {
328 XFRMA_SPD_UNSPEC, 328 XFRMA_SPD_UNSPEC,
329 XFRMA_SPD_INFO, 329 XFRMA_SPD_INFO,
330 XFRMA_SPD_HINFO, 330 XFRMA_SPD_HINFO,
331 XFRMA_SPD_IPV4_HTHRESH,
332 XFRMA_SPD_IPV6_HTHRESH,
331 __XFRMA_SPD_MAX 333 __XFRMA_SPD_MAX
332 334
333#define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1) 335#define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1)
@@ -347,6 +349,11 @@ struct xfrmu_spdhinfo {
347 __u32 spdhmcnt; 349 __u32 spdhmcnt;
348}; 350};
349 351
352struct xfrmu_spdhthresh {
353 __u8 lbits;
354 __u8 rbits;
355};
356
350struct xfrm_usersa_info { 357struct xfrm_usersa_info {
351 struct xfrm_selector sel; 358 struct xfrm_selector sel;
352 struct xfrm_id id; 359 struct xfrm_id id;
diff --git a/include/uapi/misc/Kbuild b/include/uapi/misc/Kbuild
new file mode 100644
index 000000000000..e96cae7d58c9
--- /dev/null
+++ b/include/uapi/misc/Kbuild
@@ -0,0 +1,2 @@
1# misc Header export list
2header-y += cxl.h
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
new file mode 100644
index 000000000000..cd6d789b73ec
--- /dev/null
+++ b/include/uapi/misc/cxl.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef _UAPI_MISC_CXL_H
11#define _UAPI_MISC_CXL_H
12
13#include <linux/types.h>
14#include <linux/ioctl.h>
15
16
17struct cxl_ioctl_start_work {
18 __u64 flags;
19 __u64 work_element_descriptor;
20 __u64 amr;
21 __s16 num_interrupts;
22 __s16 reserved1;
23 __s32 reserved2;
24 __u64 reserved3;
25 __u64 reserved4;
26 __u64 reserved5;
27 __u64 reserved6;
28};
29
30#define CXL_START_WORK_AMR 0x0000000000000001ULL
31#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
32#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
33 CXL_START_WORK_NUM_IRQS)
34
35/* ioctl numbers */
36#define CXL_MAGIC 0xCA
37#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
38#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
39
40#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
41
42/* Events from read() */
43enum cxl_event_type {
44 CXL_EVENT_RESERVED = 0,
45 CXL_EVENT_AFU_INTERRUPT = 1,
46 CXL_EVENT_DATA_STORAGE = 2,
47 CXL_EVENT_AFU_ERROR = 3,
48};
49
50struct cxl_event_header {
51 __u16 type;
52 __u16 size;
53 __u16 process_element;
54 __u16 reserved1;
55};
56
57struct cxl_event_afu_interrupt {
58 __u16 flags;
59 __u16 irq; /* Raised AFU interrupt number */
60 __u32 reserved1;
61};
62
63struct cxl_event_data_storage {
64 __u16 flags;
65 __u16 reserved1;
66 __u32 reserved2;
67 __u64 addr;
68 __u64 dsisr;
69 __u64 reserved3;
70};
71
72struct cxl_event_afu_error {
73 __u16 flags;
74 __u16 reserved1;
75 __u32 reserved2;
76 __u64 error;
77};
78
79struct cxl_event {
80 struct cxl_event_header header;
81 union {
82 struct cxl_event_afu_interrupt irq;
83 struct cxl_event_data_storage fault;
84 struct cxl_event_afu_error afu_error;
85 };
86};
87
88#endif /* _UAPI_MISC_CXL_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 32168f7ffce3..6ee586728df9 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -219,7 +219,8 @@ typedef int __bitwise snd_pcm_format_t;
219#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */ 219#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */
220#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */ 220#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
221#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */ 221#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
222#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U16_LE 222#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
223#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE
223 224
224#ifdef SNDRV_LITTLE_ENDIAN 225#ifdef SNDRV_LITTLE_ENDIAN
225#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE 226#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 3e43e22cdff9..ef64b66b18df 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -108,6 +108,42 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
108void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num); 108void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
109 109
110/* 110/*
111 * IPU Channel Parameter Memory (cpmem) functions
112 */
113struct ipu_rgb {
114 struct fb_bitfield red;
115 struct fb_bitfield green;
116 struct fb_bitfield blue;
117 struct fb_bitfield transp;
118 int bits_per_pixel;
119};
120
121struct ipu_image {
122 struct v4l2_pix_format pix;
123 struct v4l2_rect rect;
124 dma_addr_t phys;
125};
126
127void ipu_cpmem_zero(struct ipuv3_channel *ch);
128void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres);
129void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
130void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
131void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
132void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
133void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
134int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
135 const struct ipu_rgb *rgb);
136int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
137void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
138void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
139 u32 pixel_format, int stride,
140 int u_offset, int v_offset);
141void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
142 u32 pixel_format, int stride, int height);
143int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
144int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image);
145
146/*
111 * IPU Display Controller (dc) functions 147 * IPU Display Controller (dc) functions
112 */ 148 */
113struct ipu_dc; 149struct ipu_dc;
@@ -180,161 +216,9 @@ int ipu_smfc_disable(struct ipu_soc *ipu);
180int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id); 216int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id);
181int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize); 217int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize);
182 218
183#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
184
185#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22)
186#define IPU_FIELD_VBO IPU_CPMEM_WORD(0, 68, 22)
187#define IPU_FIELD_IOX IPU_CPMEM_WORD(0, 90, 4)
188#define IPU_FIELD_RDRW IPU_CPMEM_WORD(0, 94, 1)
189#define IPU_FIELD_SO IPU_CPMEM_WORD(0, 113, 1)
190#define IPU_FIELD_SLY IPU_CPMEM_WORD(1, 102, 14)
191#define IPU_FIELD_SLUV IPU_CPMEM_WORD(1, 128, 14)
192
193#define IPU_FIELD_XV IPU_CPMEM_WORD(0, 0, 10)
194#define IPU_FIELD_YV IPU_CPMEM_WORD(0, 10, 9)
195#define IPU_FIELD_XB IPU_CPMEM_WORD(0, 19, 13)
196#define IPU_FIELD_YB IPU_CPMEM_WORD(0, 32, 12)
197#define IPU_FIELD_NSB_B IPU_CPMEM_WORD(0, 44, 1)
198#define IPU_FIELD_CF IPU_CPMEM_WORD(0, 45, 1)
199#define IPU_FIELD_SX IPU_CPMEM_WORD(0, 46, 12)
200#define IPU_FIELD_SY IPU_CPMEM_WORD(0, 58, 11)
201#define IPU_FIELD_NS IPU_CPMEM_WORD(0, 69, 10)
202#define IPU_FIELD_SDX IPU_CPMEM_WORD(0, 79, 7)
203#define IPU_FIELD_SM IPU_CPMEM_WORD(0, 86, 10)
204#define IPU_FIELD_SCC IPU_CPMEM_WORD(0, 96, 1)
205#define IPU_FIELD_SCE IPU_CPMEM_WORD(0, 97, 1)
206#define IPU_FIELD_SDY IPU_CPMEM_WORD(0, 98, 7)
207#define IPU_FIELD_SDRX IPU_CPMEM_WORD(0, 105, 1)
208#define IPU_FIELD_SDRY IPU_CPMEM_WORD(0, 106, 1)
209#define IPU_FIELD_BPP IPU_CPMEM_WORD(0, 107, 3)
210#define IPU_FIELD_DEC_SEL IPU_CPMEM_WORD(0, 110, 2)
211#define IPU_FIELD_DIM IPU_CPMEM_WORD(0, 112, 1)
212#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3)
213#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2)
214#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1)
215#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1)
216#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1)
217#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1)
218#define IPU_FIELD_CAP IPU_CPMEM_WORD(0, 123, 1)
219#define IPU_FIELD_CAE IPU_CPMEM_WORD(0, 124, 1)
220#define IPU_FIELD_FW IPU_CPMEM_WORD(0, 125, 13)
221#define IPU_FIELD_FH IPU_CPMEM_WORD(0, 138, 12)
222#define IPU_FIELD_EBA0 IPU_CPMEM_WORD(1, 0, 29)
223#define IPU_FIELD_EBA1 IPU_CPMEM_WORD(1, 29, 29)
224#define IPU_FIELD_ILO IPU_CPMEM_WORD(1, 58, 20)
225#define IPU_FIELD_NPB IPU_CPMEM_WORD(1, 78, 7)
226#define IPU_FIELD_PFS IPU_CPMEM_WORD(1, 85, 4)
227#define IPU_FIELD_ALU IPU_CPMEM_WORD(1, 89, 1)
228#define IPU_FIELD_ALBM IPU_CPMEM_WORD(1, 90, 3)
229#define IPU_FIELD_ID IPU_CPMEM_WORD(1, 93, 2)
230#define IPU_FIELD_TH IPU_CPMEM_WORD(1, 95, 7)
231#define IPU_FIELD_SL IPU_CPMEM_WORD(1, 102, 14)
232#define IPU_FIELD_WID0 IPU_CPMEM_WORD(1, 116, 3)
233#define IPU_FIELD_WID1 IPU_CPMEM_WORD(1, 119, 3)
234#define IPU_FIELD_WID2 IPU_CPMEM_WORD(1, 122, 3)
235#define IPU_FIELD_WID3 IPU_CPMEM_WORD(1, 125, 3)
236#define IPU_FIELD_OFS0 IPU_CPMEM_WORD(1, 128, 5)
237#define IPU_FIELD_OFS1 IPU_CPMEM_WORD(1, 133, 5)
238#define IPU_FIELD_OFS2 IPU_CPMEM_WORD(1, 138, 5)
239#define IPU_FIELD_OFS3 IPU_CPMEM_WORD(1, 143, 5)
240#define IPU_FIELD_SXYS IPU_CPMEM_WORD(1, 148, 1)
241#define IPU_FIELD_CRE IPU_CPMEM_WORD(1, 149, 1)
242#define IPU_FIELD_DEC_SEL2 IPU_CPMEM_WORD(1, 150, 1)
243
244struct ipu_cpmem_word {
245 u32 data[5];
246 u32 res[3];
247};
248
249struct ipu_ch_param {
250 struct ipu_cpmem_word word[2];
251};
252
253void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v);
254u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs);
255struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel);
256void ipu_ch_param_dump(struct ipu_ch_param __iomem *p);
257
258static inline void ipu_ch_param_zero(struct ipu_ch_param __iomem *p)
259{
260 int i;
261 void __iomem *base = p;
262
263 for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
264 writel(0, base + i * sizeof(u32));
265}
266
267static inline void ipu_cpmem_set_buffer(struct ipu_ch_param __iomem *p,
268 int bufnum, dma_addr_t buf)
269{
270 if (bufnum)
271 ipu_ch_param_write_field(p, IPU_FIELD_EBA1, buf >> 3);
272 else
273 ipu_ch_param_write_field(p, IPU_FIELD_EBA0, buf >> 3);
274}
275
276static inline void ipu_cpmem_set_resolution(struct ipu_ch_param __iomem *p,
277 int xres, int yres)
278{
279 ipu_ch_param_write_field(p, IPU_FIELD_FW, xres - 1);
280 ipu_ch_param_write_field(p, IPU_FIELD_FH, yres - 1);
281}
282
283static inline void ipu_cpmem_set_stride(struct ipu_ch_param __iomem *p,
284 int stride)
285{
286 ipu_ch_param_write_field(p, IPU_FIELD_SLY, stride - 1);
287}
288
289void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel);
290
291struct ipu_rgb {
292 struct fb_bitfield red;
293 struct fb_bitfield green;
294 struct fb_bitfield blue;
295 struct fb_bitfield transp;
296 int bits_per_pixel;
297};
298
299struct ipu_image {
300 struct v4l2_pix_format pix;
301 struct v4l2_rect rect;
302 dma_addr_t phys;
303};
304
305int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
306 int width);
307
308int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *,
309 const struct ipu_rgb *rgb);
310
311static inline void ipu_cpmem_interlaced_scan(struct ipu_ch_param *p,
312 int stride)
313{
314 ipu_ch_param_write_field(p, IPU_FIELD_SO, 1);
315 ipu_ch_param_write_field(p, IPU_FIELD_ILO, stride / 8);
316 ipu_ch_param_write_field(p, IPU_FIELD_SLY, (stride * 2) - 1);
317};
318
319void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
320 int stride, int height);
321void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p,
322 u32 pixel_format);
323void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p,
324 u32 pixel_format, int stride, int u_offset, int v_offset);
325int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat);
326int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
327 struct ipu_image *image);
328
329enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); 219enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
330enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); 220enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
331 221
332static inline void ipu_cpmem_set_burstsize(struct ipu_ch_param __iomem *p,
333 int burstsize)
334{
335 ipu_ch_param_write_field(p, IPU_FIELD_NPB, burstsize - 1);
336};
337
338struct ipu_client_platformdata { 222struct ipu_client_platformdata {
339 int csi; 223 int csi;
340 int di; 224 int di;
diff --git a/include/xen/events.h b/include/xen/events.h
index 8bee7a75e850..5321cd9636e6 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -28,6 +28,8 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
28 unsigned long irqflags, 28 unsigned long irqflags,
29 const char *devname, 29 const char *devname,
30 void *dev_id); 30 void *dev_id);
31int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
32 unsigned int remote_port);
31int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 33int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
32 unsigned int remote_port, 34 unsigned int remote_port,
33 irq_handler_t handler, 35 irq_handler_t handler,
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 6f4eae328ca7..f90b03454659 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -3,6 +3,24 @@
3 * 3 *
4 * Definitions used for the Xen ELF notes. 4 * Definitions used for the Xen ELF notes.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
6 * Copyright (c) 2006, Ian Campbell, XenSource Ltd. 24 * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
7 */ 25 */
8 26
@@ -18,12 +36,13 @@
18 * 36 *
19 * LEGACY indicated the fields in the legacy __xen_guest string which 37 * LEGACY indicated the fields in the legacy __xen_guest string which
20 * this a note type replaces. 38 * this a note type replaces.
39 *
40 * String values (for non-legacy) are NULL terminated ASCII, also known
41 * as ASCIZ type.
21 */ 42 */
22 43
23/* 44/*
24 * NAME=VALUE pair (string). 45 * NAME=VALUE pair (string).
25 *
26 * LEGACY: FEATURES and PAE
27 */ 46 */
28#define XEN_ELFNOTE_INFO 0 47#define XEN_ELFNOTE_INFO 0
29 48
@@ -137,10 +156,30 @@
137 156
138/* 157/*
139 * Whether or not the guest supports cooperative suspend cancellation. 158 * Whether or not the guest supports cooperative suspend cancellation.
159 * This is a numeric value.
160 *
161 * Default is 0
140 */ 162 */
141#define XEN_ELFNOTE_SUSPEND_CANCEL 14 163#define XEN_ELFNOTE_SUSPEND_CANCEL 14
142 164
143/* 165/*
166 * The (non-default) location the initial phys-to-machine map should be
167 * placed at by the hypervisor (Dom0) or the tools (DomU).
168 * The kernel must be prepared for this mapping to be established using
169 * large pages, despite such otherwise not being available to guests.
170 * The kernel must also be able to handle the page table pages used for
171 * this mapping not being accessible through the initial mapping.
172 * (Only x86-64 supports this at present.)
173 */
174#define XEN_ELFNOTE_INIT_P2M 15
175
176/*
177 * Whether or not the guest can deal with being passed an initrd not
178 * mapped through its initial page tables.
179 */
180#define XEN_ELFNOTE_MOD_START_PFN 16
181
182/*
144 * The features supported by this kernel (numeric). 183 * The features supported by this kernel (numeric).
145 * 184 *
146 * Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a 185 * Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a
@@ -153,6 +192,11 @@
153 */ 192 */
154#define XEN_ELFNOTE_SUPPORTED_FEATURES 17 193#define XEN_ELFNOTE_SUPPORTED_FEATURES 17
155 194
195/*
196 * The number of the highest elfnote defined.
197 */
198#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUPPORTED_FEATURES
199
156#endif /* __XEN_PUBLIC_ELFNOTE_H__ */ 200#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
157 201
158/* 202/*
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6ccdba25..14334d0161d5 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,6 +53,9 @@
53/* operation as Dom0 is supported */ 53/* operation as Dom0 is supported */
54#define XENFEAT_dom0 11 54#define XENFEAT_dom0 11
55 55
56/* Xen also maps grant references at pfn = mfn */
57#define XENFEAT_grant_map_identity 12
58
56#define XENFEAT_NR_SUBMAPS 1 59#define XENFEAT_NR_SUBMAPS 1
57 60
58#endif /* __XEN_PUBLIC_FEATURES_H__ */ 61#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
new file mode 100644
index 000000000000..d07d7aca8d1c
--- /dev/null
+++ b/include/xen/interface/io/vscsiif.h
@@ -0,0 +1,229 @@
1/******************************************************************************
2 * vscsiif.h
3 *
4 * Based on the blkif.h code.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright(c) FUJITSU Limited 2008.
25 */
26
27#ifndef __XEN__PUBLIC_IO_SCSI_H__
28#define __XEN__PUBLIC_IO_SCSI_H__
29
30#include "ring.h"
31#include "../grant_table.h"
32
33/*
34 * Feature and Parameter Negotiation
35 * =================================
36 * The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to
37 * communicate capabilities and to negotiate operating parameters. This
38 * section enumerates these nodes which reside in the respective front and
39 * backend portions of the XenStore, following the XenBus convention.
40 *
41 * Any specified default value is in effect if the corresponding XenBus node
42 * is not present in the XenStore.
43 *
44 * XenStore nodes in sections marked "PRIVATE" are solely for use by the
45 * driver side whose XenBus tree contains them.
46 *
47 *****************************************************************************
48 * Backend XenBus Nodes
49 *****************************************************************************
50 *
51 *------------------ Backend Device Identification (PRIVATE) ------------------
52 *
53 * p-devname
54 * Values: string
55 *
56 * A free string used to identify the physical device (e.g. a disk name).
57 *
58 * p-dev
59 * Values: string
60 *
61 * A string specifying the backend device: either a 4-tuple "h:c:t:l"
62 * (host, controller, target, lun, all integers), or a WWN (e.g.
63 * "naa.60014054ac780582").
64 *
65 * v-dev
66 * Values: string
67 *
68 * A string specifying the frontend device in form of a 4-tuple "h:c:t:l"
69 * (host, controller, target, lun, all integers).
70 *
71 *--------------------------------- Features ---------------------------------
72 *
73 * feature-sg-grant
74 * Values: unsigned [VSCSIIF_SG_TABLESIZE...65535]
75 * Default Value: 0
76 *
77 * Specifies the maximum number of scatter/gather elements in grant pages
78 * supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE
79 * SG elements specified directly in the request.
80 *
81 *****************************************************************************
82 * Frontend XenBus Nodes
83 *****************************************************************************
84 *
85 *----------------------- Request Transport Parameters -----------------------
86 *
87 * event-channel
88 * Values: unsigned
89 *
90 * The identifier of the Xen event channel used to signal activity
91 * in the ring buffer.
92 *
93 * ring-ref
94 * Values: unsigned
95 *
96 * The Xen grant reference granting permission for the backend to map
97 * the sole page in a single page sized ring buffer.
98 *
99 * protocol
100 * Values: string (XEN_IO_PROTO_ABI_*)
101 * Default Value: XEN_IO_PROTO_ABI_NATIVE
102 *
103 * The machine ABI rules governing the format of all ring request and
104 * response structures.
105 */
106
107/* Requests from the frontend to the backend */
108
109/*
110 * Request a SCSI operation specified via a CDB in vscsiif_request.cmnd.
111 * The target is specified via channel, id and lun.
112 *
113 * The operation to be performed is specified via a CDB in cmnd[], the length
114 * of the CDB is in cmd_len. sc_data_direction specifies the direction of data
115 * (to the device, from the device, or none at all).
116 *
117 * If data is to be transferred to or from the device the buffer(s) in the
118 * guest memory is/are specified via one or multiple scsiif_request_segment
119 * descriptors each specifying a memory page via a grant_ref_t, a offset into
120 * the page and the length of the area in that page. All scsiif_request_segment
121 * areas concatenated form the resulting data buffer used by the operation.
122 * If the number of scsiif_request_segment areas is not too large (less than
123 * or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the
124 * seg[] array and the number of valid scsiif_request_segment elements is to be
125 * set in nr_segments.
126 *
127 * If "feature-sg-grant" in the Xenstore is set it is possible to specify more
128 * than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection.
129 * The maximum number of allowed scsiif_request_segment elements is the value
130 * of the "feature-sg-grant" entry from Xenstore. When using indirection the
131 * seg[] array doesn't contain specifications of the data buffers, but
132 * references to scsiif_request_segment arrays, which in turn reference the
133 * data buffers. While nr_segments holds the number of populated seg[] entries
134 * (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
135 * elements referencing the target data buffers is calculated from the lengths
136 * of the seg[] elements (the sum of all valid seg[].length divided by the
137 * size of one scsiif_request_segment structure).
138 */
139#define VSCSIIF_ACT_SCSI_CDB 1
140
141/*
142 * Request abort of a running operation for the specified target given by
143 * channel, id, lun and the operation's rqid in ref_rqid.
144 */
145#define VSCSIIF_ACT_SCSI_ABORT 2
146
147/*
148 * Request a device reset of the specified target (channel and id).
149 */
150#define VSCSIIF_ACT_SCSI_RESET 3
151
152/*
153 * Preset scatter/gather elements for a following request. Deprecated.
154 * Keeping the define only to avoid usage of the value "4" for other actions.
155 */
156#define VSCSIIF_ACT_SCSI_SG_PRESET 4
157
158/*
159 * Maximum scatter/gather segments per request.
160 *
161 * Considering balance between allocating at least 16 "vscsiif_request"
162 * structures on one page (4096 bytes) and the number of scatter/gather
163 * elements needed, we decided to use 26 as a magic number.
164 *
165 * If "feature-sg-grant" is set, more scatter/gather elements can be specified
166 * by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages.
167 * In this case the vscsiif_request seg elements don't contain references to
168 * the user data, but to the SG elements referencing the user data.
169 */
170#define VSCSIIF_SG_TABLESIZE 26
171
172/*
173 * based on Linux kernel 2.6.18, still valid
174 * Changing these values requires support of multiple protocols via the rings
175 * as "old clients" will blindly use these values and the resulting structure
176 * sizes.
177 */
178#define VSCSIIF_MAX_COMMAND_SIZE 16
179#define VSCSIIF_SENSE_BUFFERSIZE 96
180
181struct scsiif_request_segment {
182 grant_ref_t gref;
183 uint16_t offset;
184 uint16_t length;
185};
186
187#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
188
189/* Size of one request is 252 bytes */
190struct vscsiif_request {
191 uint16_t rqid; /* private guest value, echoed in resp */
192 uint8_t act; /* command between backend and frontend */
193 uint8_t cmd_len; /* valid CDB bytes */
194
195 uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; /* the CDB */
196 uint16_t timeout_per_command; /* deprecated */
197 uint16_t channel, id, lun; /* (virtual) device specification */
198 uint16_t ref_rqid; /* command abort reference */
199 uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
200 DMA_FROM_DEVICE(2)
201 DMA_NONE(3) requests */
202 uint8_t nr_segments; /* Number of pieces of scatter-gather */
203/*
204 * flag in nr_segments: SG elements via grant page
205 *
206 * If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number
207 * of grant pages containing SG elements. Usable if "feature-sg-grant" set.
208 */
209#define VSCSIIF_SG_GRANT 0x80
210
211 struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
212 uint32_t reserved[3];
213};
214
215/* Size of one response is 252 bytes */
216struct vscsiif_response {
217 uint16_t rqid; /* identifies request */
218 uint8_t padding;
219 uint8_t sense_len;
220 uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
221 int32_t rslt;
222 uint32_t residual_len; /* request bufflen -
223 return the value from physical device */
224 uint32_t reserved[36];
225};
226
227DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
228
229#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index de082130ba4b..f68719f405af 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -3,6 +3,24 @@
3 * 3 *
4 * Guest OS interface to Xen. 4 * Guest OS interface to Xen.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
6 * Copyright (c) 2004, K A Fraser 24 * Copyright (c) 2004, K A Fraser
7 */ 25 */
8 26
@@ -73,13 +91,23 @@
73 * VIRTUAL INTERRUPTS 91 * VIRTUAL INTERRUPTS
74 * 92 *
75 * Virtual interrupts that a guest OS may receive from Xen. 93 * Virtual interrupts that a guest OS may receive from Xen.
94 * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
95 * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
96 * The latter can be allocated only once per guest: they must initially be
97 * allocated to VCPU0 but can subsequently be re-bound.
76 */ 98 */
77#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */ 99#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
78#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */ 100#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
79#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */ 101#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
80#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */ 102#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
81#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ 103#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
82#define VIRQ_PCPU_STATE 9 /* (DOM0) PCPU state changed */ 104#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
105#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
106#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
107#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
108#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
109#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
110#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
83 111
84/* Architecture-specific VIRQ definitions. */ 112/* Architecture-specific VIRQ definitions. */
85#define VIRQ_ARCH_0 16 113#define VIRQ_ARCH_0 16
@@ -92,24 +120,68 @@
92#define VIRQ_ARCH_7 23 120#define VIRQ_ARCH_7 23
93 121
94#define NR_VIRQS 24 122#define NR_VIRQS 24
123
95/* 124/*
96 * MMU-UPDATE REQUESTS 125 * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[],
97 * 126 * unsigned count, unsigned *done_out,
98 * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. 127 * unsigned foreigndom)
99 * A foreigndom (FD) can be specified (or DOMID_SELF for none). 128 * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
100 * Where the FD has some effect, it is described below. 129 * @count is the length of the above array.
101 * ptr[1:0] specifies the appropriate MMU_* command. 130 * @pdone is an output parameter indicating number of completed operations
131 * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
132 * hypercall invocation. Can be DOMID_SELF.
133 * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
134 * in this hypercall invocation. The value of this field
135 * (x) encodes the PFD as follows:
136 * x == 0 => PFD == DOMID_SELF
137 * x != 0 => PFD == x - 1
102 * 138 *
139 * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
140 * -------------
103 * ptr[1:0] == MMU_NORMAL_PT_UPDATE: 141 * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
104 * Updates an entry in a page table. If updating an L1 table, and the new 142 * Updates an entry in a page table belonging to PFD. If updating an L1 table,
105 * table entry is valid/present, the mapped frame must belong to the FD, if 143 * and the new table entry is valid/present, the mapped frame must belong to
106 * an FD has been specified. If attempting to map an I/O page then the 144 * FD. If attempting to map an I/O page then the caller assumes the privilege
107 * caller assumes the privilege of the FD. 145 * of the FD.
108 * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. 146 * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
109 * FD == DOMID_XEN: Map restricted areas of Xen's heap space. 147 * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
110 * ptr[:2] -- Machine address of the page-table entry to modify. 148 * ptr[:2] -- Machine address of the page-table entry to modify.
111 * val -- Value to write. 149 * val -- Value to write.
112 * 150 *
151 * There also certain implicit requirements when using this hypercall. The
152 * pages that make up a pagetable must be mapped read-only in the guest.
153 * This prevents uncontrolled guest updates to the pagetable. Xen strictly
154 * enforces this, and will disallow any pagetable update which will end up
155 * mapping pagetable page RW, and will disallow using any writable page as a
156 * pagetable. In practice it means that when constructing a page table for a
157 * process, thread, etc, we MUST be very dilligient in following these rules:
158 * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
159 * the entries.
160 * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
161 * or L2).
162 * 3). Start filling out the PTE table (L1) with the PTE entries. Once
163 * done, make sure to set each of those entries to RO (so writeable bit
164 * is unset). Once that has been completed, set the PMD (L2) for this
165 * PTE table as RO.
166 * 4). When completed with all of the PMD (L2) entries, and all of them have
167 * been set to RO, make sure to set RO the PUD (L3). Do the same
168 * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
169 * 5). Now before you can use those pages (so setting the cr3), you MUST also
170 * pin them so that the hypervisor can verify the entries. This is done
171 * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
172 * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
173 * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
174 * issued.
175 * For 32-bit guests, the L4 is not used (as there is less pagetables), so
176 * instead use L3.
177 * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
178 * hypercall. Also if so desired the OS can also try to write to the PTE
179 * and be trapped by the hypervisor (as the PTE entry is RO).
180 *
181 * To deallocate the pages, the operations are the reverse of the steps
182 * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
183 * pagetable MUST not be in use (meaning that the cr3 is not set to it).
184 *
113 * ptr[1:0] == MMU_MACHPHYS_UPDATE: 185 * ptr[1:0] == MMU_MACHPHYS_UPDATE:
114 * Updates an entry in the machine->pseudo-physical mapping table. 186 * Updates an entry in the machine->pseudo-physical mapping table.
115 * ptr[:2] -- Machine address within the frame whose mapping to modify. 187 * ptr[:2] -- Machine address within the frame whose mapping to modify.
@@ -119,6 +191,72 @@
119 * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: 191 * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
120 * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed 192 * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
121 * with those in @val. 193 * with those in @val.
194 *
195 * @val is usually the machine frame number along with some attributes.
196 * The attributes by default follow the architecture defined bits. Meaning that
197 * if this is a X86_64 machine and four page table layout is used, the layout
198 * of val is:
199 * - 63 if set means No execute (NX)
200 * - 46-13 the machine frame number
201 * - 12 available for guest
202 * - 11 available for guest
203 * - 10 available for guest
204 * - 9 available for guest
205 * - 8 global
206 * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
207 * - 6 dirty
208 * - 5 accessed
209 * - 4 page cached disabled
210 * - 3 page write through
211 * - 2 userspace accessible
212 * - 1 writeable
213 * - 0 present
214 *
215 * The one bits that does not fit with the default layout is the PAGE_PSE
216 * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
217 * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
218 * (or 2MB) instead of using the PAGE_PSE bit.
219 *
220 * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
221 * using it as the Page Attribute Table (PAT) bit - for details on it please
222 * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
223 * pages instead of using MTRRs.
224 *
225 * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
226 * PAT4 PAT0
227 * +-----+-----+----+----+----+-----+----+----+
228 * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
229 * +-----+-----+----+----+----+-----+----+----+
230 * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
231 * +-----+-----+----+----+----+-----+----+----+
232 * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
233 * +-----+-----+----+----+----+-----+----+----+
234 *
235 * The lookup of this index table translates to looking up
236 * Bit 7, Bit 4, and Bit 3 of val entry:
237 *
238 * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
239 *
240 * If all bits are off, then we are using PAT0. If bit 3 turned on,
241 * then we are using PAT1, if bit 3 and bit 4, then PAT2..
242 *
243 * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
244 * that if a guest that follows Linux's PAT setup and would like to set Write
245 * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
246 * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
247 * caching as:
248 *
249 * WB = none (so PAT0)
250 * WC = PWT (bit 3 on)
251 * UC = PWT | PCD (bit 3 and 4 are on).
252 *
253 * To make it work with Xen, it needs to translate the WC bit as so:
254 *
255 * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
256 *
257 * And to translate back it would:
258 *
259 * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
122 */ 260 */
123#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ 261#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
124#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ 262#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
@@ -127,7 +265,12 @@
127/* 265/*
128 * MMU EXTENDED OPERATIONS 266 * MMU EXTENDED OPERATIONS
129 * 267 *
130 * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. 268 * enum neg_errnoval HYPERVISOR_mmuext_op(mmuext_op_t uops[],
269 * unsigned int count,
270 * unsigned int *pdone,
271 * unsigned int foreigndom)
272 */
273/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
131 * A foreigndom (FD) can be specified (or DOMID_SELF for none). 274 * A foreigndom (FD) can be specified (or DOMID_SELF for none).
132 * Where the FD has some effect, it is described below. 275 * Where the FD has some effect, it is described below.
133 * 276 *
@@ -164,9 +307,23 @@
164 * cmd: MMUEXT_FLUSH_CACHE 307 * cmd: MMUEXT_FLUSH_CACHE
165 * No additional arguments. Writes back and flushes cache contents. 308 * No additional arguments. Writes back and flushes cache contents.
166 * 309 *
310 * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
311 * No additional arguments. Writes back and flushes cache contents
312 * on all CPUs in the system.
313 *
167 * cmd: MMUEXT_SET_LDT 314 * cmd: MMUEXT_SET_LDT
168 * linear_addr: Linear address of LDT base (NB. must be page-aligned). 315 * linear_addr: Linear address of LDT base (NB. must be page-aligned).
169 * nr_ents: Number of entries in LDT. 316 * nr_ents: Number of entries in LDT.
317 *
318 * cmd: MMUEXT_CLEAR_PAGE
319 * mfn: Machine frame number to be cleared.
320 *
321 * cmd: MMUEXT_COPY_PAGE
322 * mfn: Machine frame number of the destination page.
323 * src_mfn: Machine frame number of the source page.
324 *
325 * cmd: MMUEXT_[UN]MARK_SUPER
326 * mfn: Machine frame number of head of superpage to be [un]marked.
170 */ 327 */
171#define MMUEXT_PIN_L1_TABLE 0 328#define MMUEXT_PIN_L1_TABLE 0
172#define MMUEXT_PIN_L2_TABLE 1 329#define MMUEXT_PIN_L2_TABLE 1
@@ -183,12 +340,18 @@
183#define MMUEXT_FLUSH_CACHE 12 340#define MMUEXT_FLUSH_CACHE 12
184#define MMUEXT_SET_LDT 13 341#define MMUEXT_SET_LDT 13
185#define MMUEXT_NEW_USER_BASEPTR 15 342#define MMUEXT_NEW_USER_BASEPTR 15
343#define MMUEXT_CLEAR_PAGE 16
344#define MMUEXT_COPY_PAGE 17
345#define MMUEXT_FLUSH_CACHE_GLOBAL 18
346#define MMUEXT_MARK_SUPER 19
347#define MMUEXT_UNMARK_SUPER 20
186 348
187#ifndef __ASSEMBLY__ 349#ifndef __ASSEMBLY__
188struct mmuext_op { 350struct mmuext_op {
189 unsigned int cmd; 351 unsigned int cmd;
190 union { 352 union {
191 /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ 353 /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
354 * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
192 xen_pfn_t mfn; 355 xen_pfn_t mfn;
193 /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ 356 /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
194 unsigned long linear_addr; 357 unsigned long linear_addr;
@@ -198,6 +361,8 @@ struct mmuext_op {
198 unsigned int nr_ents; 361 unsigned int nr_ents;
199 /* TLB_FLUSH_MULTI, INVLPG_MULTI */ 362 /* TLB_FLUSH_MULTI, INVLPG_MULTI */
200 void *vcpumask; 363 void *vcpumask;
364 /* COPY_PAGE */
365 xen_pfn_t src_mfn;
201 } arg2; 366 } arg2;
202}; 367};
203DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); 368DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
@@ -225,10 +390,23 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
225 */ 390 */
226#define VMASST_CMD_enable 0 391#define VMASST_CMD_enable 0
227#define VMASST_CMD_disable 1 392#define VMASST_CMD_disable 1
393
394/* x86/32 guests: simulate full 4GB segment limits. */
228#define VMASST_TYPE_4gb_segments 0 395#define VMASST_TYPE_4gb_segments 0
396
397/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
229#define VMASST_TYPE_4gb_segments_notify 1 398#define VMASST_TYPE_4gb_segments_notify 1
399
400/*
401 * x86 guests: support writes to bottom-level PTEs.
402 * NB1. Page-directory entries cannot be written.
403 * NB2. Guest must continue to remove all writable mappings of PTEs.
404 */
230#define VMASST_TYPE_writable_pagetables 2 405#define VMASST_TYPE_writable_pagetables 2
406
407/* x86/PAE guests: support PDPTs above 4GB. */
231#define VMASST_TYPE_pae_extended_cr3 3 408#define VMASST_TYPE_pae_extended_cr3 3
409
232#define MAX_VMASST_TYPE 3 410#define MAX_VMASST_TYPE 3
233 411
234#ifndef __ASSEMBLY__ 412#ifndef __ASSEMBLY__
@@ -260,6 +438,15 @@ typedef uint16_t domid_t;
260 */ 438 */
261#define DOMID_XEN (0x7FF2U) 439#define DOMID_XEN (0x7FF2U)
262 440
441/* DOMID_COW is used as the owner of sharable pages */
442#define DOMID_COW (0x7FF3U)
443
444/* DOMID_INVALID is used to identify pages with unknown owner. */
445#define DOMID_INVALID (0x7FF4U)
446
447/* Idle domain. */
448#define DOMID_IDLE (0x7FFFU)
449
263/* 450/*
264 * Send an array of these to HYPERVISOR_mmu_update(). 451 * Send an array of these to HYPERVISOR_mmu_update().
265 * NB. The fields are natural pointer/address size for this architecture. 452 * NB. The fields are natural pointer/address size for this architecture.
@@ -272,7 +459,9 @@ DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
272 459
273/* 460/*
274 * Send an array of these to HYPERVISOR_multicall(). 461 * Send an array of these to HYPERVISOR_multicall().
275 * NB. The fields are natural register size for this architecture. 462 * NB. The fields are logically the natural register size for this
463 * architecture. In cases where xen_ulong_t is larger than this then
464 * any unused bits in the upper portion must be zero.
276 */ 465 */
277struct multicall_entry { 466struct multicall_entry {
278 xen_ulong_t op; 467 xen_ulong_t op;
@@ -442,8 +631,48 @@ struct start_info {
442 unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ 631 unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
443 unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ 632 unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
444 int8_t cmd_line[MAX_GUEST_CMDLINE]; 633 int8_t cmd_line[MAX_GUEST_CMDLINE];
634 /* The pfn range here covers both page table and p->m table frames. */
635 unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
636 unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
445}; 637};
446 638
639/* These flags are passed in the 'flags' field of start_info_t. */
640#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
641#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
642#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
643#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
644#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
645
646/*
647 * A multiboot module is a package containing modules very similar to a
648 * multiboot module array. The only differences are:
649 * - the array of module descriptors is by convention simply at the beginning
650 * of the multiboot module,
651 * - addresses in the module descriptors are based on the beginning of the
652 * multiboot module,
653 * - the number of modules is determined by a termination descriptor that has
654 * mod_start == 0.
655 *
656 * This permits to both build it statically and reference it in a configuration
657 * file, and let the PV guest easily rebase the addresses to virtual addresses
658 * and at the same time count the number of modules.
659 */
660struct xen_multiboot_mod_list {
661 /* Address of first byte of the module */
662 uint32_t mod_start;
663 /* Address of last byte of the module (inclusive) */
664 uint32_t mod_end;
665 /* Address of zero-terminated command line */
666 uint32_t cmdline;
667 /* Unused, must be zero */
668 uint32_t pad;
669};
670/*
671 * The console structure in start_info.console.dom0
672 *
673 * This structure includes a variety of information required to
674 * have a working VGA/VESA console.
675 */
447struct dom0_vga_console_info { 676struct dom0_vga_console_info {
448 uint8_t video_type; 677 uint8_t video_type;
449#define XEN_VGATYPE_TEXT_MODE_3 0x03 678#define XEN_VGATYPE_TEXT_MODE_3 0x03
@@ -484,11 +713,6 @@ struct dom0_vga_console_info {
484 } u; 713 } u;
485}; 714};
486 715
487/* These flags are passed in the 'flags' field of start_info_t. */
488#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
489#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
490#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
491
492typedef uint64_t cpumap_t; 716typedef uint64_t cpumap_t;
493 717
494typedef uint8_t xen_domain_handle_t[16]; 718typedef uint8_t xen_domain_handle_t[16];
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 0324c6d340c1..b78f21caf55a 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -86,6 +86,7 @@ struct xenbus_device_id
86 86
87/* A xenbus driver. */ 87/* A xenbus driver. */
88struct xenbus_driver { 88struct xenbus_driver {
89 const char *name; /* defaults to ids[0].devicetype */
89 const struct xenbus_device_id *ids; 90 const struct xenbus_device_id *ids;
90 int (*probe)(struct xenbus_device *dev, 91 int (*probe)(struct xenbus_device *dev,
91 const struct xenbus_device_id *id); 92 const struct xenbus_device_id *id);
@@ -100,20 +101,22 @@ struct xenbus_driver {
100 int (*is_ready)(struct xenbus_device *dev); 101 int (*is_ready)(struct xenbus_device *dev);
101}; 102};
102 103
103#define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \
104struct xenbus_driver var ## _driver = { \
105 .driver.name = drvname + 0 ?: var ## _ids->devicetype, \
106 .driver.owner = THIS_MODULE, \
107 .ids = var ## _ids, ## methods \
108}
109
110static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) 104static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
111{ 105{
112 return container_of(drv, struct xenbus_driver, driver); 106 return container_of(drv, struct xenbus_driver, driver);
113} 107}
114 108
115int __must_check xenbus_register_frontend(struct xenbus_driver *); 109int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
116int __must_check xenbus_register_backend(struct xenbus_driver *); 110 struct module *owner,
111 const char *mod_name);
112int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
113 struct module *owner,
114 const char *mod_name);
115
116#define xenbus_register_frontend(drv) \
117 __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
118#define xenbus_register_backend(drv) \
119 __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
117 120
118void xenbus_unregister_driver(struct xenbus_driver *drv); 121void xenbus_unregister_driver(struct xenbus_driver *drv);
119 122