aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2016-06-18 20:25:08 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2016-06-18 20:25:08 -0400
commit6ea24cf79e055f0a62a64baa8587e2254a493c7b (patch)
treec5cd6113ed93854b1bc30cd471c366f080c4be2f /include/linux
parent540c26087bfbad6ea72758b76b16ae6282a73fea (diff)
parent488326947cd1f038da8d2c9068a0d07b913b7983 (diff)
Merge branch 'cec-defines' into for-linus
Let's bring in HDMI CEC defines to ease merging CEC support in the next merge window.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/amba/bus.h9
-rw-r--r--include/linux/apple-gmux.h50
-rw-r--r--include/linux/atmel_serial.h3
-rw-r--r--include/linux/atomic.h31
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/auto_dev-ioctl.h6
-rw-r--r--include/linux/auto_fs.h10
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h42
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/bitmap.h10
-rw-r--r--include/linux/blk-mq.h16
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/bpf.h35
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/buffer_head.h14
-rw-r--r--include/linux/bug.h9
-rw-r--r--include/linux/cache.h14
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/ccp.h17
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/ceph_features.h2
-rw-r--r--include/linux/ceph/ceph_fs.h7
-rw-r--r--include/linux/ceph/libceph.h12
-rw-r--r--include/linux/ceph/mon_client.h31
-rw-r--r--include/linux/ceph/osd_client.h16
-rw-r--r--include/linux/cgroup-defs.h47
-rw-r--r--include/linux/cgroup.h49
-rw-r--r--include/linux/clk-provider.h30
-rw-r--r--include/linux/clk/at91_pmc.h12
-rw-r--r--include/linux/clk/renesas.h (renamed from include/linux/clk/shmobile.h)4
-rw-r--r--include/linux/clk/ti.h8
-rw-r--r--include/linux/clkdev.h3
-rw-r--r--include/linux/clockchips.h4
-rw-r--r--include/linux/clocksource.h45
-rw-r--r--include/linux/compaction.h16
-rw-r--r--include/linux/compat.h21
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/compiler.h17
-rw-r--r--include/linux/configfs.h15
-rw-r--r--include/linux/coresight-pmu.h39
-rw-r--r--include/linux/coresight.h34
-rw-r--r--include/linux/cpu.h27
-rw-r--r--include/linux/cpufreq.h47
-rw-r--r--include/linux/cpuhotplug.h93
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/cred.h5
-rw-r--r--include/linux/crypto.h252
-rw-r--r--include/linux/davinci_emac.h4
-rw-r--r--include/linux/dcache.h57
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/device-mapper.h15
-rw-r--r--include/linux/device.h24
-rw-r--r--include/linux/devpts_fs.h38
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/linux/dma-buf.h14
-rw-r--r--include/linux/dma-mapping.h27
-rw-r--r--include/linux/dmaengine.h8
-rw-r--r--include/linux/dqblk_qtree.h2
-rw-r--r--include/linux/eeprom_93xx46.h9
-rw-r--r--include/linux/efi.h91
-rw-r--r--include/linux/ethtool.h102
-rw-r--r--include/linux/exportfs.h6
-rw-r--r--include/linux/f2fs_fs.h38
-rw-r--r--include/linux/fault-inject.h5
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/frame.h23
-rw-r--r--include/linux/freezer.h2
-rw-r--r--include/linux/fs.h51
-rw-r--r--include/linux/fscrypto.h435
-rw-r--r--include/linux/fsl/guts.h105
-rw-r--r--include/linux/fsnotify.h9
-rw-r--r--include/linux/fsnotify_backend.h9
-rw-r--r--include/linux/ftrace.h23
-rw-r--r--include/linux/gfp.h49
-rw-r--r--include/linux/gpio/driver.h39
-rw-r--r--include/linux/hash.h20
-rw-r--r--include/linux/hrtimer.h12
-rw-r--r--include/linux/huge_mm.h32
-rw-r--r--include/linux/hyperv.h96
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_ether.h5
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/igmp.h5
-rw-r--r--include/linux/iio/common/st_sensors.h4
-rw-r--r--include/linux/iio/iio.h8
-rw-r--r--include/linux/ima.h10
-rw-r--r--include/linux/inet_lro.h142
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/interrupt.h30
-rw-r--r--include/linux/io.h1
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/ioport.h38
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h27
-rw-r--r--include/linux/irqchip/mips-gic.h3
-rw-r--r--include/linux/irqdomain.h46
-rw-r--r--include/linux/iscsi_boot_sysfs.h1
-rw-r--r--include/linux/isdn.h1
-rw-r--r--include/linux/jbd2.h16
-rw-r--r--include/linux/kasan.h31
-rw-r--r--include/linux/kcov.h29
-rw-r--r--include/linux/kernel.h31
-rw-r--r--include/linux/kernfs.h13
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/kvm_host.h5
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/leds.h8
-rw-r--r--include/linux/libnvdimm.h5
-rw-r--r--include/linux/lightnvm.h22
-rw-r--r--include/linux/list_bl.h4
-rw-r--r--include/linux/livepatch.h9
-rw-r--r--include/linux/lockdep.h10
-rw-r--r--include/linux/lsm_hooks.h35
-rw-r--r--include/linux/mISDNif.h2
-rw-r--r--include/linux/mbcache.h93
-rw-r--r--include/linux/mbus.h3
-rw-r--r--include/linux/memcontrol.h147
-rw-r--r--include/linux/memory.h14
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/mfd/as3711.h3
-rw-r--r--include/linux/mfd/axp20x.h34
-rw-r--r--include/linux/mfd/cros_ec.h2
-rw-r--r--include/linux/mfd/imx25-tsadc.h140
-rw-r--r--include/linux/mfd/max77686-private.h3
-rw-r--r--include/linux/mfd/mt6323/core.h36
-rw-r--r--include/linux/mfd/mt6323/registers.h408
-rw-r--r--include/linux/mfd/mt6397/core.h2
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rc5t583.h5
-rw-r--r--include/linux/mfd/samsung/s2mps11.h2
-rw-r--r--include/linux/mfd/syscon.h8
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h5
-rw-r--r--include/linux/mfd/tmio.h4
-rw-r--r--include/linux/mfd/tps65086.h117
-rw-r--r--include/linux/mfd/tps65090.h5
-rw-r--r--include/linux/mfd/tps65912.h209
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/mlx4/device.h10
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/linux/mlx5/device.h67
-rw-r--r--include/linux/mlx5/driver.h81
-rw-r--r--include/linux/mlx5/fs.h5
-rw-r--r--include/linux/mlx5/mlx5_ifc.h195
-rw-r--r--include/linux/mlx5/port.h87
-rw-r--r--include/linux/mlx5/qp.h7
-rw-r--r--include/linux/mlx5/vport.h9
-rw-r--r--include/linux/mm.h166
-rw-r--r--include/linux/mm_types.h24
-rw-r--r--include/linux/mman.h6
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h12
-rw-r--r--include/linux/mmc/tmio.h5
-rw-r--r--include/linux/mmdebug.h3
-rw-r--r--include/linux/mmzone.h26
-rw-r--r--include/linux/msi.h9
-rw-r--r--include/linux/mtd/bbm.h1
-rw-r--r--include/linux/mtd/inftl.h1
-rw-r--r--include/linux/mtd/map.h7
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/mtd/nand_bch.h8
-rw-r--r--include/linux/mtd/nftl.h1
-rw-r--r--include/linux/mtd/spi-nor.h2
-rw-r--r--include/linux/namei.h3
-rw-r--r--include/linux/nd.h7
-rw-r--r--include/linux/net.h11
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h337
-rw-r--r--include/linux/netfilter.h29
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netfilter/x_tables.h6
-rw-r--r--include/linux/netfilter_arp/arp_tables.h9
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h9
-rw-r--r--include/linux/netlink.h10
-rw-r--r--include/linux/nfs4.h19
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nilfs2_fs.h4
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/nsproxy.h2
-rw-r--r--include/linux/ntb.h10
-rw-r--r--include/linux/nvmem-provider.h5
-rw-r--r--include/linux/of.h20
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/omap-gpmc.h5
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags-layout.h2
-rw-r--r--include/linux/page-flags.h54
-rw-r--r--include/linux/page_ext.h1
-rw-r--r--include/linux/page_owner.h50
-rw-r--r--include/linux/page_ref.h173
-rw-r--r--include/linux/pagemap.h54
-rw-r--r--include/linux/pci-dma-compat.h147
-rw-r--r--include/linux/pci.h89
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h25
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/phy_fixed.h5
-rw-r--r--include/linux/pkeys.h33
-rw-r--r--include/linux/platform_data/ad5761.h44
-rw-r--r--include/linux/platform_data/adau17x1.h2
-rw-r--r--include/linux/platform_data/at24.h10
-rw-r--r--include/linux/platform_data/brcmfmac-sdio.h135
-rw-r--r--include/linux/platform_data/brcmfmac.h185
-rw-r--r--include/linux/platform_data/microread.h35
-rw-r--r--include/linux/platform_data/mmp_dma.h1
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h1
-rw-r--r--include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--include/linux/platform_data/sa11x0-serial.h8
-rw-r--r--include/linux/platform_data/serial-omap.h2
-rw-r--r--include/linux/pm_clock.h9
-rw-r--r--include/linux/pm_domain.h13
-rw-r--r--include/linux/pm_opp.h27
-rw-r--r--include/linux/pmem.h57
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/posix-timers.h3
-rw-r--r--include/linux/power/bq24735-charger.h2
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/pps_kernel.h17
-rw-r--r--include/linux/proc_ns.h4
-rw-r--r--include/linux/psci.h3
-rw-r--r--include/linux/pstore_ram.h2
-rw-r--r--include/linux/ptp_clock_kernel.h8
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/qed/common_hsi.h36
-rw-r--r--include/linux/qed/eth_common.h171
-rw-r--r--include/linux/qed/qed_chain.h4
-rw-r--r--include/linux/qed/qed_eth_if.h14
-rw-r--r--include/linux/qed/qed_if.h14
-rw-r--r--include/linux/quicklist.h2
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/radix-tree.h28
-rw-r--r--include/linux/rculist.h21
-rw-r--r--include/linux/rculist_nulls.h39
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/regmap.h107
-rw-r--r--include/linux/regulator/act8865.h4
-rw-r--r--include/linux/regulator/driver.h17
-rw-r--r--include/linux/regulator/lp872x.h5
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/rfkill-gpio.h37
-rw-r--r--include/linux/rfkill.h18
-rw-r--r--include/linux/rio.h98
-rw-r--r--include/linux/rio_drv.h15
-rw-r--r--include/linux/rio_regs.h3
-rw-r--r--include/linux/rmap.h6
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/sched.h62
-rw-r--r--include/linux/sched/sysctl.h25
-rw-r--r--include/linux/scpi_protocol.h3
-rw-r--r--include/linux/security.h16
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/serial_8250.h8
-rw-r--r--include/linux/serial_core.h25
-rw-r--r--include/linux/skbuff.h69
-rw-r--r--include/linux/slab.h23
-rw-r--r--include/linux/slab_def.h17
-rw-r--r--include/linux/slub_def.h12
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h24
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h693
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h35
-rw-r--r--include/linux/socket.h7
-rw-r--r--include/linux/spi/eeprom.h2
-rw-r--r--include/linux/spi/spi.h145
-rw-r--r--include/linux/srcu.h19
-rw-r--r--include/linux/stackdepot.h32
-rw-r--r--include/linux/stm.h10
-rw-r--r--include/linux/stmmac.h18
-rw-r--r--include/linux/string.h8
-rw-r--r--include/linux/sunrpc/auth.h7
-rw-r--r--include/linux/sunrpc/clnt.h18
-rw-r--r--include/linux/sunrpc/gss_krb5.h32
-rw-r--r--include/linux/sunrpc/rpc_rdma.h12
-rw-r--r--include/linux/sunrpc/sched.h32
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h20
-rw-r--r--include/linux/sunrpc/xprt.h22
-rw-r--r--include/linux/sunrpc/xprtmultipath.h69
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/swait.h172
-rw-r--r--include/linux/swap.h14
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/tcp.h14
-rw-r--r--include/linux/thermal.h24
-rw-r--r--include/linux/tick.h99
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h58
-rw-r--r--include/linux/trace_events.h17
-rw-r--r--include/linux/tracepoint-defs.h14
-rw-r--r--include/linux/tty.h58
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/tty_ldisc.h13
-rw-r--r--include/linux/uio.h1
-rw-r--r--include/linux/unaligned/access_ok.h24
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h20
-rw-r--r--include/linux/usb/hcd.h5
-rw-r--r--include/linux/usb/msm_hsusb_hw.h1
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/of.h7
-rw-r--r--include/linux/usb/otg-fsm.h15
-rw-r--r--include/linux/usb/renesas_usbhs.h1
-rw-r--r--include/linux/usb/storage.h12
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/vfio.h11
-rw-r--r--include/linux/vga_switcheroo.h36
-rw-r--r--include/linux/virtio.h23
-rw-r--r--include/linux/virtio_ring.h35
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmw_vmci_defs.h43
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/watchdog.h43
327 files changed, 7530 insertions, 2272 deletions
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 9006c4e75cf7..3d8dcdd1aeae 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -163,4 +163,13 @@ struct amba_device name##_device = { \
163#define module_amba_driver(__amba_drv) \ 163#define module_amba_driver(__amba_drv) \
164 module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) 164 module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
165 165
166/*
167 * builtin_amba_driver() - Helper macro for drivers that don't do anything
168 * special in driver initcall. This eliminates a lot of boilerplate. Each
169 * driver may only use this macro once, and calling it replaces the instance
170 * device_initcall().
171 */
172#define builtin_amba_driver(__amba_drv) \
173 builtin_driver(__amba_drv, amba_driver_register)
174
166#endif 175#endif
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
new file mode 100644
index 000000000000..b2d32e01dfe4
--- /dev/null
+++ b/include/linux/apple-gmux.h
@@ -0,0 +1,50 @@
1/*
2 * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
3 * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License (version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef LINUX_APPLE_GMUX_H
19#define LINUX_APPLE_GMUX_H
20
21#include <linux/acpi.h>
22
23#define GMUX_ACPI_HID "APP000B"
24
25#if IS_ENABLED(CONFIG_APPLE_GMUX)
26
27/**
28 * apple_gmux_present() - detect if gmux is built into the machine
29 *
30 * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
31 * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
32 *
33 * Return: %true if gmux is present and the kernel was configured
34 * with CONFIG_APPLE_GMUX, %false otherwise.
35 */
36static inline bool apple_gmux_present(void)
37{
38 return acpi_dev_present(GMUX_ACPI_HID);
39}
40
41#else /* !CONFIG_APPLE_GMUX */
42
43static inline bool apple_gmux_present(void)
44{
45 return false;
46}
47
48#endif /* !CONFIG_APPLE_GMUX */
49
50#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index ee696d7e8a43..5a4d664af87a 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -119,7 +119,8 @@
119#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */ 119#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
120#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */ 120#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
121 121
122#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */ 122#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */
123#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */
123#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */ 124#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
124 125
125#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */ 126#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 301de78d65f7..506c3531832e 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -34,20 +34,29 @@
34 * The idea here is to build acquire/release variants by adding explicit 34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed 35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed. 36 * variant is already fully ordered, no additional barriers are needed.
37 *
38 * Besides, if an arch has a special barrier for acquire/release, it could
39 * implement its own __atomic_op_* and use the same framework for building
40 * variants
37 */ 41 */
42#ifndef __atomic_op_acquire
38#define __atomic_op_acquire(op, args...) \ 43#define __atomic_op_acquire(op, args...) \
39({ \ 44({ \
40 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ 45 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
41 smp_mb__after_atomic(); \ 46 smp_mb__after_atomic(); \
42 __ret; \ 47 __ret; \
43}) 48})
49#endif
44 50
51#ifndef __atomic_op_release
45#define __atomic_op_release(op, args...) \ 52#define __atomic_op_release(op, args...) \
46({ \ 53({ \
47 smp_mb__before_atomic(); \ 54 smp_mb__before_atomic(); \
48 op##_relaxed(args); \ 55 op##_relaxed(args); \
49}) 56})
57#endif
50 58
59#ifndef __atomic_op_fence
51#define __atomic_op_fence(op, args...) \ 60#define __atomic_op_fence(op, args...) \
52({ \ 61({ \
53 typeof(op##_relaxed(args)) __ret; \ 62 typeof(op##_relaxed(args)) __ret; \
@@ -56,6 +65,7 @@
56 smp_mb__after_atomic(); \ 65 smp_mb__after_atomic(); \
57 __ret; \ 66 __ret; \
58}) 67})
68#endif
59 69
60/* atomic_add_return_relaxed */ 70/* atomic_add_return_relaxed */
61#ifndef atomic_add_return_relaxed 71#ifndef atomic_add_return_relaxed
@@ -548,6 +558,27 @@ static inline int atomic_dec_if_positive(atomic_t *v)
548} 558}
549#endif 559#endif
550 560
561/**
562 * atomic_fetch_or - perform *p |= mask and return old value of *p
563 * @p: pointer to atomic_t
564 * @mask: mask to OR on the atomic_t
565 */
566#ifndef atomic_fetch_or
567static inline int atomic_fetch_or(atomic_t *p, int mask)
568{
569 int old, val = atomic_read(p);
570
571 for (;;) {
572 old = atomic_cmpxchg(p, val, val | mask);
573 if (old == val)
574 break;
575 val = old;
576 }
577
578 return old;
579}
580#endif
581
551#ifdef CONFIG_GENERIC_ATOMIC64 582#ifdef CONFIG_GENERIC_ATOMIC64
552#include <asm-generic/atomic64.h> 583#include <asm-generic/atomic64.h>
553#endif 584#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b40ed5df5542..e38e3fc13ea8 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -109,6 +109,10 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
109/* maximized args number that audit_socketcall can process */ 109/* maximized args number that audit_socketcall can process */
110#define AUDITSC_ARGS 6 110#define AUDITSC_ARGS 6
111 111
112/* bit values for ->signal->audit_tty */
113#define AUDIT_TTY_ENABLE BIT(0)
114#define AUDIT_TTY_LOG_PASSWD BIT(1)
115
112struct filename; 116struct filename;
113 117
114extern void audit_log_session_info(struct audit_buffer *ab); 118extern void audit_log_session_info(struct audit_buffer *ab);
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 850f39b33e74..7caaf298f539 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -11,12 +11,7 @@
11#define _LINUX_AUTO_DEV_IOCTL_H 11#define _LINUX_AUTO_DEV_IOCTL_H
12 12
13#include <linux/auto_fs.h> 13#include <linux/auto_fs.h>
14
15#ifdef __KERNEL__
16#include <linux/string.h> 14#include <linux/string.h>
17#else
18#include <string.h>
19#endif /* __KERNEL__ */
20 15
21#define AUTOFS_DEVICE_NAME "autofs" 16#define AUTOFS_DEVICE_NAME "autofs"
22 17
@@ -125,7 +120,6 @@ static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
125 in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; 120 in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
126 in->size = sizeof(struct autofs_dev_ioctl); 121 in->size = sizeof(struct autofs_dev_ioctl);
127 in->ioctlfd = -1; 122 in->ioctlfd = -1;
128 return;
129} 123}
130 124
131/* 125/*
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index fcd704d354c4..b4066bb89083 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -1,14 +1,10 @@
1/* -*- linux-c -*- ------------------------------------------------------- * 1/*
2 * 2 * Copyright 1997 Transmeta Corporation - All Rights Reserved
3 * linux/include/linux/auto_fs.h
4 *
5 * Copyright 1997 Transmeta Corporation - All Rights Reserved
6 * 3 *
7 * This file is part of the Linux kernel and is made available under 4 * This file is part of the Linux kernel and is made available under
8 * the terms of the GNU General Public License, version 2, or at your 5 * the terms of the GNU General Public License, version 2, or at your
9 * option, any later version, incorporated herein by reference. 6 * option, any later version, incorporated herein by reference.
10 * 7 */
11 * ----------------------------------------------------------------------- */
12 8
13#ifndef _LINUX_AUTO_FS_H 9#ifndef _LINUX_AUTO_FS_H
14#define _LINUX_AUTO_FS_H 10#define _LINUX_AUTO_FS_H
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..3f103076d0bf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@ struct bdi_writeback {
135 135
136struct backing_dev_info { 136struct backing_dev_info {
137 struct list_head bdi_list; 137 struct list_head bdi_list;
138 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 138 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
139 unsigned int capabilities; /* Device capabilities */ 139 unsigned int capabilities; /* Device capabilities */
140 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 140 congested_fn *congested_fn; /* Function pointer if device is md/dm */
141 void *congested_data; /* Pointer to aux data for congested func */ 141 void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3feb1b2d75d8..0367c63f5960 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -151,6 +151,8 @@ struct bcma_host_ops {
151#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ 151#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */
152#define BCMA_CORE_USB30_DEV 0x83D 152#define BCMA_CORE_USB30_DEV 0x83D
153#define BCMA_CORE_ARM_CR4 0x83E 153#define BCMA_CORE_ARM_CR4 0x83E
154#define BCMA_CORE_GCI 0x840
155#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */
154#define BCMA_CORE_ARM_CA7 0x847 156#define BCMA_CORE_ARM_CA7 0x847
155#define BCMA_CORE_SYS_MEM 0x849 157#define BCMA_CORE_SYS_MEM 0x849
156#define BCMA_CORE_DEFAULT 0xFFF 158#define BCMA_CORE_DEFAULT 0xFFF
@@ -199,6 +201,7 @@ struct bcma_host_ops {
199#define BCMA_PKG_ID_BCM4707 1 201#define BCMA_PKG_ID_BCM4707 1
200#define BCMA_PKG_ID_BCM4708 2 202#define BCMA_PKG_ID_BCM4708 2
201#define BCMA_PKG_ID_BCM4709 0 203#define BCMA_PKG_ID_BCM4709 0
204#define BCMA_CHIP_ID_BCM47094 53030
202#define BCMA_CHIP_ID_BCM53018 53018 205#define BCMA_CHIP_ID_BCM53018 53018
203 206
204/* Board types (on PCI usually equals to the subsystem dev id) */ 207/* Board types (on PCI usually equals to the subsystem dev id) */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index db51a6ffb7d6..846513c73606 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -217,6 +217,11 @@
217#define BCMA_CC_CLKDIV_JTAG_SHIFT 8 217#define BCMA_CC_CLKDIV_JTAG_SHIFT 8
218#define BCMA_CC_CLKDIV_UART 0x000000FF 218#define BCMA_CC_CLKDIV_UART 0x000000FF
219#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ 219#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */
220#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001
221#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002
222#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004
223#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */
224#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040
220#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ 225#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */
221#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ 226#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */
222#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ 227#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */
@@ -351,12 +356,12 @@
351#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ 356#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */
352#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ 357#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */
353#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ 358#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */
354#define BCMA_CC_CHIPCTL_ADDR 0x0650 359#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650
355#define BCMA_CC_CHIPCTL_DATA 0x0654 360#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654
356#define BCMA_CC_REGCTL_ADDR 0x0658 361#define BCMA_CC_PMU_REGCTL_ADDR 0x0658
357#define BCMA_CC_REGCTL_DATA 0x065C 362#define BCMA_CC_PMU_REGCTL_DATA 0x065C
358#define BCMA_CC_PLLCTL_ADDR 0x0660 363#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660
359#define BCMA_CC_PLLCTL_DATA 0x0664 364#define BCMA_CC_PMU_PLLCTL_DATA 0x0664
360#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ 365#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */
361#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ 366#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */
362#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF 367#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF
@@ -566,17 +571,16 @@
566 * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) 571 * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
567 */ 572 */
568struct bcma_chipcommon_pmu { 573struct bcma_chipcommon_pmu {
574 struct bcma_device *core; /* Can be separated core or just ChipCommon one */
569 u8 rev; /* PMU revision */ 575 u8 rev; /* PMU revision */
570 u32 crystalfreq; /* The active crystal frequency (in kHz) */ 576 u32 crystalfreq; /* The active crystal frequency (in kHz) */
571}; 577};
572 578
573#ifdef CONFIG_BCMA_DRIVER_MIPS 579#ifdef CONFIG_BCMA_PFLASH
574struct bcma_pflash { 580struct bcma_pflash {
575 bool present; 581 bool present;
576 u8 buswidth;
577 u32 window;
578 u32 window_size;
579}; 582};
583#endif
580 584
581#ifdef CONFIG_BCMA_SFLASH 585#ifdef CONFIG_BCMA_SFLASH
582struct mtd_info; 586struct mtd_info;
@@ -600,6 +604,7 @@ struct bcma_nflash {
600}; 604};
601#endif 605#endif
602 606
607#ifdef CONFIG_BCMA_DRIVER_MIPS
603struct bcma_serial_port { 608struct bcma_serial_port {
604 void *regs; 609 void *regs;
605 unsigned long clockspeed; 610 unsigned long clockspeed;
@@ -619,8 +624,9 @@ struct bcma_drv_cc {
619 /* Fast Powerup Delay constant */ 624 /* Fast Powerup Delay constant */
620 u16 fast_pwrup_delay; 625 u16 fast_pwrup_delay;
621 struct bcma_chipcommon_pmu pmu; 626 struct bcma_chipcommon_pmu pmu;
622#ifdef CONFIG_BCMA_DRIVER_MIPS 627#ifdef CONFIG_BCMA_PFLASH
623 struct bcma_pflash pflash; 628 struct bcma_pflash pflash;
629#endif
624#ifdef CONFIG_BCMA_SFLASH 630#ifdef CONFIG_BCMA_SFLASH
625 struct bcma_sflash sflash; 631 struct bcma_sflash sflash;
626#endif 632#endif
@@ -628,6 +634,7 @@ struct bcma_drv_cc {
628 struct bcma_nflash nflash; 634 struct bcma_nflash nflash;
629#endif 635#endif
630 636
637#ifdef CONFIG_BCMA_DRIVER_MIPS
631 int nr_serial_ports; 638 int nr_serial_ports;
632 struct bcma_serial_port serial_ports[4]; 639 struct bcma_serial_port serial_ports[4];
633#endif /* CONFIG_BCMA_DRIVER_MIPS */ 640#endif /* CONFIG_BCMA_DRIVER_MIPS */
@@ -660,6 +667,19 @@ struct bcma_drv_cc_b {
660#define bcma_cc_maskset32(cc, offset, mask, set) \ 667#define bcma_cc_maskset32(cc, offset, mask, set) \
661 bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) 668 bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
662 669
670/* PMU registers access */
671#define bcma_pmu_read32(cc, offset) \
672 bcma_read32((cc)->pmu.core, offset)
673#define bcma_pmu_write32(cc, offset, val) \
674 bcma_write32((cc)->pmu.core, offset, val)
675
676#define bcma_pmu_mask32(cc, offset, mask) \
677 bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask))
678#define bcma_pmu_set32(cc, offset, set) \
679 bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set))
680#define bcma_pmu_maskset32(cc, offset, mask, set) \
681 bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set))
682
663extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); 683extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
664 684
665extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); 685extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 88bc64f00bb5..6b7481f62218 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43#define BIO_MAX_PAGES 256 43#define BIO_MAX_PAGES 256
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46 46
47/* 47/*
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 9653fdb76a42..e9b0b9ab07e5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -59,6 +59,8 @@
59 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region 59 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
60 * bitmap_release_region(bitmap, pos, order) Free specified bit region 60 * bitmap_release_region(bitmap, pos, order) Free specified bit region
61 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region 61 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
62 * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
63 * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
62 */ 64 */
63 65
64/* 66/*
@@ -163,6 +165,14 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
163extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); 165extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
164extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); 166extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
165extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); 167extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
168extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
169 unsigned int nbits,
170 const u32 *buf,
171 unsigned int nwords);
172extern unsigned int bitmap_to_u32array(u32 *buf,
173 unsigned int nwords,
174 const unsigned long *bitmap,
175 unsigned int nbits);
166#ifdef __BIG_ENDIAN 176#ifdef __BIG_ENDIAN
167extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); 177extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
168#else 178#else
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 7fc9296b5742..9ac9799b702b 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -244,6 +244,8 @@ void blk_mq_freeze_queue(struct request_queue *q);
244void blk_mq_unfreeze_queue(struct request_queue *q); 244void blk_mq_unfreeze_queue(struct request_queue *q);
245void blk_mq_freeze_queue_start(struct request_queue *q); 245void blk_mq_freeze_queue_start(struct request_queue *q);
246 246
247void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
248
247/* 249/*
248 * Driver command data is immediately after the request. So subtract request 250 * Driver command data is immediately after the request. So subtract request
249 * size to get back to the original request, add request size to get the PDU. 251 * size to get back to the original request, add request size to get the PDU.
@@ -261,22 +263,8 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
261 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 263 for ((i) = 0; (i) < (q)->nr_hw_queues && \
262 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 264 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
263 265
264#define queue_for_each_ctx(q, ctx, i) \
265 for ((i) = 0; (i) < (q)->nr_queues && \
266 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
267
268#define hctx_for_each_ctx(hctx, ctx, i) \ 266#define hctx_for_each_ctx(hctx, ctx, i) \
269 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 267 for ((i) = 0; (i) < (hctx)->nr_ctx && \
270 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 268 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
271 269
272#define blk_ctx_sum(q, sum) \
273({ \
274 struct blk_mq_ctx *__x; \
275 unsigned int __ret = 0, __i; \
276 \
277 queue_for_each_ctx((q), __x, __i) \
278 __ret += sum; \
279 __ret; \
280})
281
282#endif 270#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 413c84fbc4ed..669e419d6234 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -499,7 +499,8 @@ struct request_queue {
499 499
500#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 500#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
501 (1 << QUEUE_FLAG_STACKABLE) | \ 501 (1 << QUEUE_FLAG_STACKABLE) | \
502 (1 << QUEUE_FLAG_SAME_COMP)) 502 (1 << QUEUE_FLAG_SAME_COMP) | \
503 (1 << QUEUE_FLAG_POLL))
503 504
504static inline void queue_lockdep_assert_held(struct request_queue *q) 505static inline void queue_lockdep_assert_held(struct request_queue *q)
505{ 506{
@@ -1029,6 +1030,7 @@ extern int blk_pre_runtime_suspend(struct request_queue *q);
1029extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1030extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1030extern void blk_pre_runtime_resume(struct request_queue *q); 1031extern void blk_pre_runtime_resume(struct request_queue *q);
1031extern void blk_post_runtime_resume(struct request_queue *q, int err); 1032extern void blk_post_runtime_resume(struct request_queue *q, int err);
1033extern void blk_set_runtime_active(struct request_queue *q);
1032#else 1034#else
1033static inline void blk_pm_runtime_init(struct request_queue *q, 1035static inline void blk_pm_runtime_init(struct request_queue *q,
1034 struct device *dev) {} 1036 struct device *dev) {}
@@ -1039,6 +1041,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
1039static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1041static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1040static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1042static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1041static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1043static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1044extern inline void blk_set_runtime_active(struct request_queue *q) {}
1042#endif 1045#endif
1043 1046
1044/* 1047/*
@@ -1369,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1369 1372
1370static inline void put_dev_sector(Sector p) 1373static inline void put_dev_sector(Sector p)
1371{ 1374{
1372 page_cache_release(p.v); 1375 put_page(p.v);
1373} 1376}
1374 1377
1375static inline bool __bvec_gap_to_prev(struct request_queue *q, 1378static inline bool __bvec_gap_to_prev(struct request_queue *q,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 83d1926c61e4..f1d5c5acc8dd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
10#include <uapi/linux/bpf.h> 10#include <uapi/linux/bpf.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/percpu.h>
13 14
14struct bpf_map; 15struct bpf_map;
15 16
@@ -36,6 +37,7 @@ struct bpf_map {
36 u32 key_size; 37 u32 key_size;
37 u32 value_size; 38 u32 value_size;
38 u32 max_entries; 39 u32 max_entries;
40 u32 map_flags;
39 u32 pages; 41 u32 pages;
40 struct user_struct *user; 42 struct user_struct *user;
41 const struct bpf_map_ops *ops; 43 const struct bpf_map_ops *ops;
@@ -65,6 +67,7 @@ enum bpf_arg_type {
65 */ 67 */
66 ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ 68 ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
67 ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ 69 ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
70 ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
68 71
69 ARG_PTR_TO_CTX, /* pointer to context */ 72 ARG_PTR_TO_CTX, /* pointer to context */
70 ARG_ANYTHING, /* any (initialized) argument is ok */ 73 ARG_ANYTHING, /* any (initialized) argument is ok */
@@ -151,6 +154,7 @@ struct bpf_array {
151 union { 154 union {
152 char value[0] __aligned(8); 155 char value[0] __aligned(8);
153 void *ptrs[0] __aligned(8); 156 void *ptrs[0] __aligned(8);
157 void __percpu *pptrs[0] __aligned(8);
154 }; 158 };
155}; 159};
156#define MAX_TAIL_CALL_CNT 32 160#define MAX_TAIL_CALL_CNT 32
@@ -161,18 +165,22 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
161const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 165const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
162 166
163#ifdef CONFIG_BPF_SYSCALL 167#ifdef CONFIG_BPF_SYSCALL
168DECLARE_PER_CPU(int, bpf_prog_active);
169
164void bpf_register_prog_type(struct bpf_prog_type_list *tl); 170void bpf_register_prog_type(struct bpf_prog_type_list *tl);
165void bpf_register_map_type(struct bpf_map_type_list *tl); 171void bpf_register_map_type(struct bpf_map_type_list *tl);
166 172
167struct bpf_prog *bpf_prog_get(u32 ufd); 173struct bpf_prog *bpf_prog_get(u32 ufd);
174struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
168void bpf_prog_put(struct bpf_prog *prog); 175void bpf_prog_put(struct bpf_prog *prog);
169void bpf_prog_put_rcu(struct bpf_prog *prog); 176void bpf_prog_put_rcu(struct bpf_prog *prog);
170 177
171struct bpf_map *bpf_map_get_with_uref(u32 ufd); 178struct bpf_map *bpf_map_get_with_uref(u32 ufd);
172struct bpf_map *__bpf_map_get(struct fd f); 179struct bpf_map *__bpf_map_get(struct fd f);
173void bpf_map_inc(struct bpf_map *map, bool uref); 180struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
174void bpf_map_put_with_uref(struct bpf_map *map); 181void bpf_map_put_with_uref(struct bpf_map *map);
175void bpf_map_put(struct bpf_map *map); 182void bpf_map_put(struct bpf_map *map);
183int bpf_map_precharge_memlock(u32 pages);
176 184
177extern int sysctl_unprivileged_bpf_disabled; 185extern int sysctl_unprivileged_bpf_disabled;
178 186
@@ -182,6 +190,30 @@ int bpf_prog_new_fd(struct bpf_prog *prog);
182int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 190int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
183int bpf_obj_get_user(const char __user *pathname); 191int bpf_obj_get_user(const char __user *pathname);
184 192
193int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
194int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
195int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
196 u64 flags);
197int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
198 u64 flags);
199int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
200
201/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
202 * forced to use 'long' read/writes to try to atomically copy long counters.
203 * Best-effort only. No barriers here, since it _will_ race with concurrent
204 * updates from BPF programs. Called from bpf syscall and mostly used with
205 * size 8 or 16 bytes, so ask compiler to inline it.
206 */
207static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
208{
209 const long *lsrc = src;
210 long *ldst = dst;
211
212 size /= sizeof(long);
213 while (size--)
214 *ldst++ = *lsrc++;
215}
216
185/* verify correctness of eBPF program */ 217/* verify correctness of eBPF program */
186int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); 218int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
187#else 219#else
@@ -213,6 +245,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
213extern const struct bpf_func_proto bpf_get_current_comm_proto; 245extern const struct bpf_func_proto bpf_get_current_comm_proto;
214extern const struct bpf_func_proto bpf_skb_vlan_push_proto; 246extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
215extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; 247extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
248extern const struct bpf_func_proto bpf_get_stackid_proto;
216 249
217/* Shared helpers among cBPF and eBPF. */ 250/* Shared helpers among cBPF and eBPF. */
218void bpf_user_rnd_init_once(void); 251void bpf_user_rnd_init_once(void);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index f0ba9c2ec639..e3354b74286c 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -24,6 +24,8 @@
24#define PHY_ID_BCM7250 0xae025280 24#define PHY_ID_BCM7250 0xae025280
25#define PHY_ID_BCM7364 0xae025260 25#define PHY_ID_BCM7364 0xae025260
26#define PHY_ID_BCM7366 0x600d8490 26#define PHY_ID_BCM7366 0x600d8490
27#define PHY_ID_BCM7346 0x600d8650
28#define PHY_ID_BCM7362 0x600d84b0
27#define PHY_ID_BCM7425 0x600d86b0 29#define PHY_ID_BCM7425 0x600d86b0
28#define PHY_ID_BCM7429 0x600d8730 30#define PHY_ID_BCM7429 0x600d8730
29#define PHY_ID_BCM7435 0x600d8750 31#define PHY_ID_BCM7435 0x600d8750
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 89d9aa9e79bf..d48daa3f6f20 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -43,7 +43,7 @@ enum bh_state_bits {
43 */ 43 */
44}; 44};
45 45
46#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) 46#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
47 47
48struct page; 48struct page;
49struct buffer_head; 49struct buffer_head;
@@ -82,15 +82,15 @@ struct buffer_head {
82 * and buffer_foo() functions. 82 * and buffer_foo() functions.
83 */ 83 */
84#define BUFFER_FNS(bit, name) \ 84#define BUFFER_FNS(bit, name) \
85static inline void set_buffer_##name(struct buffer_head *bh) \ 85static __always_inline void set_buffer_##name(struct buffer_head *bh) \
86{ \ 86{ \
87 set_bit(BH_##bit, &(bh)->b_state); \ 87 set_bit(BH_##bit, &(bh)->b_state); \
88} \ 88} \
89static inline void clear_buffer_##name(struct buffer_head *bh) \ 89static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
90{ \ 90{ \
91 clear_bit(BH_##bit, &(bh)->b_state); \ 91 clear_bit(BH_##bit, &(bh)->b_state); \
92} \ 92} \
93static inline int buffer_##name(const struct buffer_head *bh) \ 93static __always_inline int buffer_##name(const struct buffer_head *bh) \
94{ \ 94{ \
95 return test_bit(BH_##bit, &(bh)->b_state); \ 95 return test_bit(BH_##bit, &(bh)->b_state); \
96} 96}
@@ -99,11 +99,11 @@ static inline int buffer_##name(const struct buffer_head *bh) \
99 * test_set_buffer_foo() and test_clear_buffer_foo() 99 * test_set_buffer_foo() and test_clear_buffer_foo()
100 */ 100 */
101#define TAS_BUFFER_FNS(bit, name) \ 101#define TAS_BUFFER_FNS(bit, name) \
102static inline int test_set_buffer_##name(struct buffer_head *bh) \ 102static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
103{ \ 103{ \
104 return test_and_set_bit(BH_##bit, &(bh)->b_state); \ 104 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
105} \ 105} \
106static inline int test_clear_buffer_##name(struct buffer_head *bh) \ 106static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
107{ \ 107{ \
108 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ 108 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
109} \ 109} \
@@ -263,7 +263,7 @@ void buffer_init(void);
263static inline void attach_page_buffers(struct page *page, 263static inline void attach_page_buffers(struct page *page,
264 struct buffer_head *head) 264 struct buffer_head *head)
265{ 265{
266 page_cache_get(page); 266 get_page(page);
267 SetPagePrivate(page); 267 SetPagePrivate(page);
268 set_page_private(page, (unsigned long)head); 268 set_page_private(page, (unsigned long)head);
269} 269}
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 7f4818673c41..e51b0709e78d 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -20,6 +20,7 @@ struct pt_regs;
20#define BUILD_BUG_ON_MSG(cond, msg) (0) 20#define BUILD_BUG_ON_MSG(cond, msg) (0)
21#define BUILD_BUG_ON(condition) (0) 21#define BUILD_BUG_ON(condition) (0)
22#define BUILD_BUG() (0) 22#define BUILD_BUG() (0)
23#define MAYBE_BUILD_BUG_ON(cond) (0)
23#else /* __CHECKER__ */ 24#else /* __CHECKER__ */
24 25
25/* Force a compilation error if a constant expression is not a power of 2 */ 26/* Force a compilation error if a constant expression is not a power of 2 */
@@ -83,6 +84,14 @@ struct pt_regs;
83 */ 84 */
84#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") 85#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
85 86
87#define MAYBE_BUILD_BUG_ON(cond) \
88 do { \
89 if (__builtin_constant_p((cond))) \
90 BUILD_BUG_ON(cond); \
91 else \
92 BUG_ON(cond); \
93 } while (0)
94
86#endif /* __CHECKER__ */ 95#endif /* __CHECKER__ */
87 96
88#ifdef CONFIG_GENERIC_BUG 97#ifdef CONFIG_GENERIC_BUG
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 17e7e82d2aa7..1be04f8c563a 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -12,10 +12,24 @@
12#define SMP_CACHE_BYTES L1_CACHE_BYTES 12#define SMP_CACHE_BYTES L1_CACHE_BYTES
13#endif 13#endif
14 14
15/*
16 * __read_mostly is used to keep rarely changing variables out of frequently
17 * updated cachelines. If an architecture doesn't support it, ignore the
18 * hint.
19 */
15#ifndef __read_mostly 20#ifndef __read_mostly
16#define __read_mostly 21#define __read_mostly
17#endif 22#endif
18 23
24/*
25 * __ro_after_init is used to mark things that are read-only after init (i.e.
26 * after mark_rodata_ro() has been called). These are effectively read-only,
27 * but may get written to during init, so can't live in .rodata (via "const").
28 */
29#ifndef __ro_after_init
30#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
31#endif
32
19#ifndef ____cacheline_aligned 33#ifndef ____cacheline_aligned
20#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) 34#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
21#endif 35#endif
diff --git a/include/linux/capability.h b/include/linux/capability.h
index f314275d4e3f..00690ff92edf 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -40,8 +40,6 @@ struct inode;
40struct dentry; 40struct dentry;
41struct user_namespace; 41struct user_namespace;
42 42
43struct user_namespace *current_user_ns(void);
44
45extern const kernel_cap_t __cap_empty_set; 43extern const kernel_cap_t __cap_empty_set;
46extern const kernel_cap_t __cap_init_eff_set; 44extern const kernel_cap_t __cap_init_eff_set;
47 45
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7f437036baa4..915af3095b39 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -33,6 +33,18 @@ struct ccp_cmd;
33 */ 33 */
34int ccp_present(void); 34int ccp_present(void);
35 35
36#define CCP_VSIZE 16
37#define CCP_VMASK ((unsigned int)((1 << CCP_VSIZE) - 1))
38#define CCP_VERSION(v, r) ((unsigned int)((v << CCP_VSIZE) \
39 | (r & CCP_VMASK)))
40
41/**
42 * ccp_version - get the version of the CCP
43 *
44 * Returns a positive version number, or zero if no CCP
45 */
46unsigned int ccp_version(void);
47
36/** 48/**
37 * ccp_enqueue_cmd - queue an operation for processing by the CCP 49 * ccp_enqueue_cmd - queue an operation for processing by the CCP
38 * 50 *
@@ -65,6 +77,11 @@ static inline int ccp_present(void)
65 return -ENODEV; 77 return -ENODEV;
66} 78}
67 79
80static inline unsigned int ccp_version(void)
81{
82 return 0;
83}
84
68static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) 85static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
69{ 86{
70 return -ENODEV; 87 return -ENODEV;
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
12 */ 12 */
13 13
14struct ceph_auth_client; 14struct ceph_auth_client;
15struct ceph_authorizer;
16struct ceph_msg; 15struct ceph_msg;
17 16
17struct ceph_authorizer {
18 void (*destroy)(struct ceph_authorizer *);
19};
20
18struct ceph_auth_handshake { 21struct ceph_auth_handshake {
19 struct ceph_authorizer *authorizer; 22 struct ceph_authorizer *authorizer;
20 void *authorizer_buf; 23 void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
62 struct ceph_auth_handshake *auth); 65 struct ceph_auth_handshake *auth);
63 int (*verify_authorizer_reply)(struct ceph_auth_client *ac, 66 int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
64 struct ceph_authorizer *a, size_t len); 67 struct ceph_authorizer *a, size_t len);
65 void (*destroy_authorizer)(struct ceph_auth_client *ac,
66 struct ceph_authorizer *a);
67 void (*invalidate_authorizer)(struct ceph_auth_client *ac, 68 void (*invalidate_authorizer)(struct ceph_auth_client *ac,
68 int peer_type); 69 int peer_type);
69 70
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
112extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 113extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
113 int peer_type, 114 int peer_type,
114 struct ceph_auth_handshake *auth); 115 struct ceph_auth_handshake *auth);
115extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 116void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
116 struct ceph_authorizer *a);
117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
118 int peer_type, 118 int peer_type,
119 struct ceph_auth_handshake *a); 119 struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 15151f3c4120..ae2f66833762 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -105,6 +105,7 @@ static inline u64 ceph_sanitize_features(u64 features)
105 */ 105 */
106#define CEPH_FEATURES_SUPPORTED_DEFAULT \ 106#define CEPH_FEATURES_SUPPORTED_DEFAULT \
107 (CEPH_FEATURE_NOSRCADDR | \ 107 (CEPH_FEATURE_NOSRCADDR | \
108 CEPH_FEATURE_SUBSCRIBE2 | \
108 CEPH_FEATURE_RECONNECT_SEQ | \ 109 CEPH_FEATURE_RECONNECT_SEQ | \
109 CEPH_FEATURE_PGID64 | \ 110 CEPH_FEATURE_PGID64 | \
110 CEPH_FEATURE_PGPOOL3 | \ 111 CEPH_FEATURE_PGPOOL3 | \
@@ -127,6 +128,7 @@ static inline u64 ceph_sanitize_features(u64 features)
127 128
128#define CEPH_FEATURES_REQUIRED_DEFAULT \ 129#define CEPH_FEATURES_REQUIRED_DEFAULT \
129 (CEPH_FEATURE_NOSRCADDR | \ 130 (CEPH_FEATURE_NOSRCADDR | \
131 CEPH_FEATURE_SUBSCRIBE2 | \
130 CEPH_FEATURE_RECONNECT_SEQ | \ 132 CEPH_FEATURE_RECONNECT_SEQ | \
131 CEPH_FEATURE_PGID64 | \ 133 CEPH_FEATURE_PGID64 | \
132 CEPH_FEATURE_PGPOOL3 | \ 134 CEPH_FEATURE_PGPOOL3 | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index d7d072a25c27..37f28bf55ce4 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -198,8 +198,8 @@ struct ceph_client_mount {
198#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */ 198#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */
199 199
200struct ceph_mon_subscribe_item { 200struct ceph_mon_subscribe_item {
201 __le64 have_version; __le64 have; 201 __le64 start;
202 __u8 onetime; 202 __u8 flags;
203} __attribute__ ((packed)); 203} __attribute__ ((packed));
204 204
205struct ceph_mon_subscribe_ack { 205struct ceph_mon_subscribe_ack {
@@ -376,7 +376,8 @@ union ceph_mds_request_args {
376 __le32 stripe_count; /* ... */ 376 __le32 stripe_count; /* ... */
377 __le32 object_size; 377 __le32 object_size;
378 __le32 file_replication; 378 __le32 file_replication;
379 __le32 unused; /* used to be preferred osd */ 379 __le32 mask; /* CEPH_CAP_* */
380 __le32 old_size;
380 } __attribute__ ((packed)) open; 381 } __attribute__ ((packed)) open;
381 struct { 382 struct {
382 __le32 flags; 383 __le32 flags;
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 3e3799cdc6e6..db92a8d4926e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -47,7 +47,6 @@ struct ceph_options {
47 unsigned long mount_timeout; /* jiffies */ 47 unsigned long mount_timeout; /* jiffies */
48 unsigned long osd_idle_ttl; /* jiffies */ 48 unsigned long osd_idle_ttl; /* jiffies */
49 unsigned long osd_keepalive_timeout; /* jiffies */ 49 unsigned long osd_keepalive_timeout; /* jiffies */
50 unsigned long monc_ping_timeout; /* jiffies */
51 50
52 /* 51 /*
53 * any type that can't be simply compared or doesn't need need 52 * any type that can't be simply compared or doesn't need need
@@ -68,7 +67,12 @@ struct ceph_options {
68#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) 67#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
69#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) 68#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
70#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) 69#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
71#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000) 70
71#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
72#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
73#define CEPH_MONC_PING_TIMEOUT msecs_to_jiffies(30 * 1000)
74#define CEPH_MONC_HUNT_BACKOFF 2
75#define CEPH_MONC_HUNT_MAX_MULT 10
72 76
73#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) 77#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
74#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) 78#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
@@ -172,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc);
172 */ 176 */
173static inline int calc_pages_for(u64 off, u64 len) 177static inline int calc_pages_for(u64 off, u64 len)
174{ 178{
175 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - 179 return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
176 (off >> PAGE_CACHE_SHIFT); 180 (off >> PAGE_SHIFT);
177} 181}
178 182
179extern struct kmem_cache *ceph_inode_cachep; 183extern struct kmem_cache *ceph_inode_cachep;
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index 81810dc21f06..e230e7ed60d3 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -68,18 +68,24 @@ struct ceph_mon_client {
68 68
69 bool hunting; 69 bool hunting;
70 int cur_mon; /* last monitor i contacted */ 70 int cur_mon; /* last monitor i contacted */
71 unsigned long sub_sent, sub_renew_after; 71 unsigned long sub_renew_after;
72 unsigned long sub_renew_sent;
72 struct ceph_connection con; 73 struct ceph_connection con;
73 74
75 bool had_a_connection;
76 int hunt_mult; /* [1..CEPH_MONC_HUNT_MAX_MULT] */
77
74 /* pending generic requests */ 78 /* pending generic requests */
75 struct rb_root generic_request_tree; 79 struct rb_root generic_request_tree;
76 int num_generic_requests; 80 int num_generic_requests;
77 u64 last_tid; 81 u64 last_tid;
78 82
79 /* mds/osd map */ 83 /* subs, indexed with CEPH_SUB_* */
80 int want_mdsmap; 84 struct {
81 int want_next_osdmap; /* 1 = want, 2 = want+asked */ 85 struct ceph_mon_subscribe_item item;
82 u32 have_osdmap, have_mdsmap; 86 bool want;
87 u32 have; /* epoch */
88 } subs[3];
83 89
84#ifdef CONFIG_DEBUG_FS 90#ifdef CONFIG_DEBUG_FS
85 struct dentry *debugfs_file; 91 struct dentry *debugfs_file;
@@ -93,14 +99,23 @@ extern int ceph_monmap_contains(struct ceph_monmap *m,
93extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); 99extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
94extern void ceph_monc_stop(struct ceph_mon_client *monc); 100extern void ceph_monc_stop(struct ceph_mon_client *monc);
95 101
102enum {
103 CEPH_SUB_MDSMAP = 0,
104 CEPH_SUB_MONMAP,
105 CEPH_SUB_OSDMAP,
106};
107
108extern const char *ceph_sub_str[];
109
96/* 110/*
97 * The model here is to indicate that we need a new map of at least 111 * The model here is to indicate that we need a new map of at least
98 * epoch @want, and also call in when we receive a map. We will 112 * epoch @epoch, and also call in when we receive a map. We will
99 * periodically rerequest the map from the monitor cluster until we 113 * periodically rerequest the map from the monitor cluster until we
100 * get what we want. 114 * get what we want.
101 */ 115 */
102extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have); 116bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
103extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); 117 bool continuous);
118void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch);
104 119
105extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); 120extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
106extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, 121extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7506b485bb6d..cbf460927c42 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@ struct ceph_msg;
16struct ceph_snap_context; 16struct ceph_snap_context;
17struct ceph_osd_request; 17struct ceph_osd_request;
18struct ceph_osd_client; 18struct ceph_osd_client;
19struct ceph_authorizer;
20 19
21/* 20/*
22 * completion callback for async writepages 21 * completion callback for async writepages
@@ -43,7 +42,8 @@ struct ceph_osd {
43}; 42};
44 43
45 44
46#define CEPH_OSD_MAX_OP 3 45#define CEPH_OSD_SLAB_OPS 2
46#define CEPH_OSD_MAX_OPS 16
47 47
48enum ceph_osd_data_type { 48enum ceph_osd_data_type {
49 CEPH_OSD_DATA_TYPE_NONE = 0, 49 CEPH_OSD_DATA_TYPE_NONE = 0,
@@ -77,7 +77,10 @@ struct ceph_osd_data {
77struct ceph_osd_req_op { 77struct ceph_osd_req_op {
78 u16 op; /* CEPH_OSD_OP_* */ 78 u16 op; /* CEPH_OSD_OP_* */
79 u32 flags; /* CEPH_OSD_OP_FLAG_* */ 79 u32 flags; /* CEPH_OSD_OP_FLAG_* */
80 u32 payload_len; 80 u32 indata_len; /* request */
81 u32 outdata_len; /* reply */
82 s32 rval;
83
81 union { 84 union {
82 struct ceph_osd_data raw_data_in; 85 struct ceph_osd_data raw_data_in;
83 struct { 86 struct {
@@ -136,7 +139,6 @@ struct ceph_osd_request {
136 139
137 /* request osd ops array */ 140 /* request osd ops array */
138 unsigned int r_num_ops; 141 unsigned int r_num_ops;
139 struct ceph_osd_req_op r_ops[CEPH_OSD_MAX_OP];
140 142
141 /* these are updated on each send */ 143 /* these are updated on each send */
142 __le32 *r_request_osdmap_epoch; 144 __le32 *r_request_osdmap_epoch;
@@ -148,8 +150,6 @@ struct ceph_osd_request {
148 struct ceph_eversion *r_request_reassert_version; 150 struct ceph_eversion *r_request_reassert_version;
149 151
150 int r_result; 152 int r_result;
151 int r_reply_op_len[CEPH_OSD_MAX_OP];
152 s32 r_reply_op_result[CEPH_OSD_MAX_OP];
153 int r_got_reply; 153 int r_got_reply;
154 int r_linger; 154 int r_linger;
155 155
@@ -174,6 +174,8 @@ struct ceph_osd_request {
174 unsigned long r_stamp; /* send OR check time */ 174 unsigned long r_stamp; /* send OR check time */
175 175
176 struct ceph_snap_context *r_snapc; /* snap context for writes */ 176 struct ceph_snap_context *r_snapc; /* snap context for writes */
177
178 struct ceph_osd_req_op r_ops[];
177}; 179};
178 180
179struct ceph_request_redirect { 181struct ceph_request_redirect {
@@ -263,6 +265,8 @@ extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
263 u64 truncate_size, u32 truncate_seq); 265 u64 truncate_size, u32 truncate_seq);
264extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 266extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
265 unsigned int which, u64 length); 267 unsigned int which, u64 length);
268extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
269 unsigned int which, u64 offset_inc);
266 270
267extern struct ceph_osd_data *osd_req_op_extent_osd_data( 271extern struct ceph_osd_data *osd_req_op_extent_osd_data(
268 struct ceph_osd_request *osd_req, 272 struct ceph_osd_request *osd_req,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 789471dba6fb..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -45,6 +45,7 @@ enum {
45 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 45 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
46 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 46 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
47 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 47 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
48 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
48}; 49};
49 50
50/* bits in struct cgroup flags field */ 51/* bits in struct cgroup flags field */
@@ -190,12 +191,13 @@ struct css_set {
190 191
191 /* 192 /*
192 * If this cset is acting as the source of migration the following 193 * If this cset is acting as the source of migration the following
193 * two fields are set. mg_src_cgrp is the source cgroup of the 194 * two fields are set. mg_src_cgrp and mg_dst_cgrp are
194 * on-going migration and mg_dst_cset is the destination cset the 195 * respectively the source and destination cgroups of the on-going
195 * target tasks on this cset should be migrated to. Protected by 196 * migration. mg_dst_cset is the destination cset the target tasks
196 * cgroup_mutex. 197 * on this cset should be migrated to. Protected by cgroup_mutex.
197 */ 198 */
198 struct cgroup *mg_src_cgrp; 199 struct cgroup *mg_src_cgrp;
200 struct cgroup *mg_dst_cgrp;
199 struct css_set *mg_dst_cset; 201 struct css_set *mg_dst_cset;
200 202
201 /* 203 /*
@@ -210,6 +212,9 @@ struct css_set {
210 /* all css_task_iters currently walking this cset */ 212 /* all css_task_iters currently walking this cset */
211 struct list_head task_iters; 213 struct list_head task_iters;
212 214
215 /* dead and being drained, ignore for migration */
216 bool dead;
217
213 /* For RCU-protected deletion */ 218 /* For RCU-protected deletion */
214 struct rcu_head rcu_head; 219 struct rcu_head rcu_head;
215}; 220};
@@ -253,13 +258,14 @@ struct cgroup {
253 /* 258 /*
254 * The bitmask of subsystems enabled on the child cgroups. 259 * The bitmask of subsystems enabled on the child cgroups.
255 * ->subtree_control is the one configured through 260 * ->subtree_control is the one configured through
256 * "cgroup.subtree_control" while ->child_subsys_mask is the 261 * "cgroup.subtree_control" while ->child_ss_mask is the effective
257 * effective one which may have more subsystems enabled. 262 * one which may have more subsystems enabled. Controller knobs
258 * Controller knobs are made available iff it's enabled in 263 * are made available iff it's enabled in ->subtree_control.
259 * ->subtree_control.
260 */ 264 */
261 unsigned int subtree_control; 265 u16 subtree_control;
262 unsigned int child_subsys_mask; 266 u16 subtree_ss_mask;
267 u16 old_subtree_control;
268 u16 old_subtree_ss_mask;
263 269
264 /* Private pointers for each registered subsystem */ 270 /* Private pointers for each registered subsystem */
265 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; 271 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
@@ -434,11 +440,11 @@ struct cgroup_subsys {
434 void (*css_released)(struct cgroup_subsys_state *css); 440 void (*css_released)(struct cgroup_subsys_state *css);
435 void (*css_free)(struct cgroup_subsys_state *css); 441 void (*css_free)(struct cgroup_subsys_state *css);
436 void (*css_reset)(struct cgroup_subsys_state *css); 442 void (*css_reset)(struct cgroup_subsys_state *css);
437 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
438 443
439 int (*can_attach)(struct cgroup_taskset *tset); 444 int (*can_attach)(struct cgroup_taskset *tset);
440 void (*cancel_attach)(struct cgroup_taskset *tset); 445 void (*cancel_attach)(struct cgroup_taskset *tset);
441 void (*attach)(struct cgroup_taskset *tset); 446 void (*attach)(struct cgroup_taskset *tset);
447 void (*post_attach)(void);
442 int (*can_fork)(struct task_struct *task); 448 int (*can_fork)(struct task_struct *task);
443 void (*cancel_fork)(struct task_struct *task); 449 void (*cancel_fork)(struct task_struct *task);
444 void (*fork)(struct task_struct *task); 450 void (*fork)(struct task_struct *task);
@@ -446,7 +452,20 @@ struct cgroup_subsys {
446 void (*free)(struct task_struct *task); 452 void (*free)(struct task_struct *task);
447 void (*bind)(struct cgroup_subsys_state *root_css); 453 void (*bind)(struct cgroup_subsys_state *root_css);
448 454
449 int early_init; 455 bool early_init:1;
456
457 /*
458 * If %true, the controller, on the default hierarchy, doesn't show
459 * up in "cgroup.controllers" or "cgroup.subtree_control", is
460 * implicitly enabled on all cgroups on the default hierarchy, and
461 * bypasses the "no internal process" constraint. This is for
462 * utility type controllers which is transparent to userland.
463 *
464 * An implicit controller can be stolen from the default hierarchy
465 * anytime and thus must be okay with offline csses from previous
466 * hierarchies coexisting with csses for the current one.
467 */
468 bool implicit_on_dfl:1;
450 469
451 /* 470 /*
452 * If %false, this subsystem is properly hierarchical - 471 * If %false, this subsystem is properly hierarchical -
@@ -460,8 +479,8 @@ struct cgroup_subsys {
460 * cases. Eventually, all subsystems will be made properly 479 * cases. Eventually, all subsystems will be made properly
461 * hierarchical and this will go away. 480 * hierarchical and this will go away.
462 */ 481 */
463 bool broken_hierarchy; 482 bool broken_hierarchy:1;
464 bool warned_broken_hierarchy; 483 bool warned_broken_hierarchy:1;
465 484
466 /* the following two fields are initialized automtically during boot */ 485 /* the following two fields are initialized automtically during boot */
467 int id; 486 int id;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 2162dca88dc0..a20320c666fd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -17,6 +17,11 @@
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/kernfs.h> 18#include <linux/kernfs.h>
19#include <linux/jump_label.h> 19#include <linux/jump_label.h>
20#include <linux/nsproxy.h>
21#include <linux/types.h>
22#include <linux/ns_common.h>
23#include <linux/nsproxy.h>
24#include <linux/user_namespace.h>
20 25
21#include <linux/cgroup-defs.h> 26#include <linux/cgroup-defs.h>
22 27
@@ -611,4 +616,48 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
611 616
612#endif /* CONFIG_CGROUP_DATA */ 617#endif /* CONFIG_CGROUP_DATA */
613 618
619struct cgroup_namespace {
620 atomic_t count;
621 struct ns_common ns;
622 struct user_namespace *user_ns;
623 struct css_set *root_cset;
624};
625
626extern struct cgroup_namespace init_cgroup_ns;
627
628#ifdef CONFIG_CGROUPS
629
630void free_cgroup_ns(struct cgroup_namespace *ns);
631
632struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
633 struct user_namespace *user_ns,
634 struct cgroup_namespace *old_ns);
635
636char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
637 struct cgroup_namespace *ns);
638
639#else /* !CONFIG_CGROUPS */
640
641static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
642static inline struct cgroup_namespace *
643copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
644 struct cgroup_namespace *old_ns)
645{
646 return old_ns;
647}
648
649#endif /* !CONFIG_CGROUPS */
650
651static inline void get_cgroup_ns(struct cgroup_namespace *ns)
652{
653 if (ns)
654 atomic_inc(&ns->count);
655}
656
657static inline void put_cgroup_ns(struct cgroup_namespace *ns)
658{
659 if (ns && atomic_dec_and_test(&ns->count))
660 free_cgroup_ns(ns);
661}
662
614#endif /* _LINUX_CGROUP_H */ 663#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1143e38555a4..da95258127aa 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -25,7 +25,7 @@
25#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ 25#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
26#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ 26#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
27#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ 27#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
28#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */ 28#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */
29#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ 29#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
30#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ 30#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
31#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ 31#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
@@ -276,6 +276,8 @@ struct clk_fixed_rate {
276 u8 flags; 276 u8 flags;
277}; 277};
278 278
279#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
280
279extern const struct clk_ops clk_fixed_rate_ops; 281extern const struct clk_ops clk_fixed_rate_ops;
280struct clk *clk_register_fixed_rate(struct device *dev, const char *name, 282struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
281 const char *parent_name, unsigned long flags, 283 const char *parent_name, unsigned long flags,
@@ -283,7 +285,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
283struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, 285struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
284 const char *name, const char *parent_name, unsigned long flags, 286 const char *name, const char *parent_name, unsigned long flags,
285 unsigned long fixed_rate, unsigned long fixed_accuracy); 287 unsigned long fixed_rate, unsigned long fixed_accuracy);
286 288void clk_unregister_fixed_rate(struct clk *clk);
287void of_fixed_clk_setup(struct device_node *np); 289void of_fixed_clk_setup(struct device_node *np);
288 290
289/** 291/**
@@ -314,6 +316,8 @@ struct clk_gate {
314 spinlock_t *lock; 316 spinlock_t *lock;
315}; 317};
316 318
319#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
320
317#define CLK_GATE_SET_TO_DISABLE BIT(0) 321#define CLK_GATE_SET_TO_DISABLE BIT(0)
318#define CLK_GATE_HIWORD_MASK BIT(1) 322#define CLK_GATE_HIWORD_MASK BIT(1)
319 323
@@ -376,6 +380,8 @@ struct clk_divider {
376 spinlock_t *lock; 380 spinlock_t *lock;
377}; 381};
378 382
383#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
384
379#define CLK_DIVIDER_ONE_BASED BIT(0) 385#define CLK_DIVIDER_ONE_BASED BIT(0)
380#define CLK_DIVIDER_POWER_OF_TWO BIT(1) 386#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
381#define CLK_DIVIDER_ALLOW_ZERO BIT(2) 387#define CLK_DIVIDER_ALLOW_ZERO BIT(2)
@@ -385,6 +391,7 @@ struct clk_divider {
385#define CLK_DIVIDER_MAX_AT_ZERO BIT(6) 391#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
386 392
387extern const struct clk_ops clk_divider_ops; 393extern const struct clk_ops clk_divider_ops;
394extern const struct clk_ops clk_divider_ro_ops;
388 395
389unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, 396unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
390 unsigned int val, const struct clk_div_table *table, 397 unsigned int val, const struct clk_div_table *table,
@@ -440,6 +447,8 @@ struct clk_mux {
440 spinlock_t *lock; 447 spinlock_t *lock;
441}; 448};
442 449
450#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
451
443#define CLK_MUX_INDEX_ONE BIT(0) 452#define CLK_MUX_INDEX_ONE BIT(0)
444#define CLK_MUX_INDEX_BIT BIT(1) 453#define CLK_MUX_INDEX_BIT BIT(1)
445#define CLK_MUX_HIWORD_MASK BIT(2) 454#define CLK_MUX_HIWORD_MASK BIT(2)
@@ -483,10 +492,13 @@ struct clk_fixed_factor {
483 unsigned int div; 492 unsigned int div;
484}; 493};
485 494
495#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
496
486extern const struct clk_ops clk_fixed_factor_ops; 497extern const struct clk_ops clk_fixed_factor_ops;
487struct clk *clk_register_fixed_factor(struct device *dev, const char *name, 498struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
488 const char *parent_name, unsigned long flags, 499 const char *parent_name, unsigned long flags,
489 unsigned int mult, unsigned int div); 500 unsigned int mult, unsigned int div);
501void clk_unregister_fixed_factor(struct clk *clk);
490 502
491/** 503/**
492 * struct clk_fractional_divider - adjustable fractional divider clock 504 * struct clk_fractional_divider - adjustable fractional divider clock
@@ -514,6 +526,8 @@ struct clk_fractional_divider {
514 spinlock_t *lock; 526 spinlock_t *lock;
515}; 527};
516 528
529#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
530
517extern const struct clk_ops clk_fractional_divider_ops; 531extern const struct clk_ops clk_fractional_divider_ops;
518struct clk *clk_register_fractional_divider(struct device *dev, 532struct clk *clk_register_fractional_divider(struct device *dev,
519 const char *name, const char *parent_name, unsigned long flags, 533 const char *name, const char *parent_name, unsigned long flags,
@@ -550,6 +564,8 @@ struct clk_multiplier {
550 spinlock_t *lock; 564 spinlock_t *lock;
551}; 565};
552 566
567#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
568
553#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) 569#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
554#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) 570#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
555 571
@@ -579,6 +595,8 @@ struct clk_composite {
579 const struct clk_ops *gate_ops; 595 const struct clk_ops *gate_ops;
580}; 596};
581 597
598#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
599
582struct clk *clk_register_composite(struct device *dev, const char *name, 600struct clk *clk_register_composite(struct device *dev, const char *name,
583 const char * const *parent_names, int num_parents, 601 const char * const *parent_names, int num_parents,
584 struct clk_hw *mux_hw, const struct clk_ops *mux_ops, 602 struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
@@ -601,13 +619,13 @@ struct clk_gpio {
601 struct gpio_desc *gpiod; 619 struct gpio_desc *gpiod;
602}; 620};
603 621
622#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
623
604extern const struct clk_ops clk_gpio_gate_ops; 624extern const struct clk_ops clk_gpio_gate_ops;
605struct clk *clk_register_gpio_gate(struct device *dev, const char *name, 625struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
606 const char *parent_name, unsigned gpio, bool active_low, 626 const char *parent_name, unsigned gpio, bool active_low,
607 unsigned long flags); 627 unsigned long flags);
608 628
609void of_gpio_clk_gate_setup(struct device_node *node);
610
611/** 629/**
612 * struct clk_gpio_mux - gpio controlled clock multiplexer 630 * struct clk_gpio_mux - gpio controlled clock multiplexer
613 * 631 *
@@ -623,8 +641,6 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
623 const char * const *parent_names, u8 num_parents, unsigned gpio, 641 const char * const *parent_names, u8 num_parents, unsigned gpio,
624 bool active_low, unsigned long flags); 642 bool active_low, unsigned long flags);
625 643
626void of_gpio_mux_clk_setup(struct device_node *node);
627
628/** 644/**
629 * clk_register - allocate a new clock, register it and return an opaque cookie 645 * clk_register - allocate a new clock, register it and return an opaque cookie
630 * @dev: device that is registering this clock 646 * @dev: device that is registering this clock
@@ -700,7 +716,7 @@ void of_clk_del_provider(struct device_node *np);
700struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 716struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
701 void *data); 717 void *data);
702struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); 718struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
703int of_clk_get_parent_count(struct device_node *np); 719unsigned int of_clk_get_parent_count(struct device_node *np);
704int of_clk_parent_fill(struct device_node *np, const char **parents, 720int of_clk_parent_fill(struct device_node *np, const char **parents,
705 unsigned int size); 721 unsigned int size);
706const char *of_clk_get_parent_name(struct device_node *np, int index); 722const char *of_clk_get_parent_name(struct device_node *np, int index);
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 1e6932222e11..17f413bbbedf 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -16,18 +16,6 @@
16#ifndef AT91_PMC_H 16#ifndef AT91_PMC_H
17#define AT91_PMC_H 17#define AT91_PMC_H
18 18
19#ifndef __ASSEMBLY__
20extern void __iomem *at91_pmc_base;
21
22#define at91_pmc_read(field) \
23 readl_relaxed(at91_pmc_base + field)
24
25#define at91_pmc_write(field, value) \
26 writel_relaxed(value, at91_pmc_base + field)
27#else
28.extern at91_pmc_base
29#endif
30
31#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ 19#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
32#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ 20#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
33 21
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/renesas.h
index cb19cc1865ca..7adfd80fbf55 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/renesas.h
@@ -11,8 +11,8 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __LINUX_CLK_SHMOBILE_H_ 14#ifndef __LINUX_CLK_RENESAS_H_
15#define __LINUX_CLK_SHMOBILE_H_ 15#define __LINUX_CLK_RENESAS_H_
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18 18
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 9a638601cb09..dc5164a6df29 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -23,8 +23,8 @@
23 * @mult_div1_reg: register containing the DPLL M and N bitfields 23 * @mult_div1_reg: register containing the DPLL M and N bitfields
24 * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg 24 * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg
25 * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg 25 * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg
26 * @clk_bypass: struct clk pointer to the clock's bypass clock input 26 * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input
27 * @clk_ref: struct clk pointer to the clock's reference clock input 27 * @clk_ref: struct clk_hw pointer to the clock's reference clock input
28 * @control_reg: register containing the DPLL mode bitfield 28 * @control_reg: register containing the DPLL mode bitfield
29 * @enable_mask: mask of the DPLL mode bitfield in @control_reg 29 * @enable_mask: mask of the DPLL mode bitfield in @control_reg
30 * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate() 30 * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
@@ -69,8 +69,8 @@ struct dpll_data {
69 void __iomem *mult_div1_reg; 69 void __iomem *mult_div1_reg;
70 u32 mult_mask; 70 u32 mult_mask;
71 u32 div1_mask; 71 u32 div1_mask;
72 struct clk *clk_bypass; 72 struct clk_hw *clk_bypass;
73 struct clk *clk_ref; 73 struct clk_hw *clk_ref;
74 void __iomem *control_reg; 74 void __iomem *control_reg;
75 u32 enable_mask; 75 u32 enable_mask;
76 unsigned long last_rounded_rate; 76 unsigned long last_rounded_rate;
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 08bffcc466de..c2c04f7cbe8a 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -44,8 +44,7 @@ struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
44void clkdev_add_table(struct clk_lookup *, size_t); 44void clkdev_add_table(struct clk_lookup *, size_t);
45int clk_add_alias(const char *, const char *, const char *, struct device *); 45int clk_add_alias(const char *, const char *, const char *, struct device *);
46 46
47int clk_register_clkdev(struct clk *, const char *, const char *, ...) 47int clk_register_clkdev(struct clk *, const char *, const char *);
48 __printf(3, 4);
49int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); 48int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
50 49
51#ifdef CONFIG_COMMON_CLK 50#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index bdcf358dfce2..0d442e34c349 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev,
190extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); 190extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
191 191
192static inline void 192static inline void
193clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) 193clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec)
194{ 194{
195 return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec); 195 return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec);
196} 196}
197 197
198extern void clockevents_suspend(void); 198extern void clockevents_suspend(void);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 6013021a3b39..a307bf62974f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -118,6 +118,23 @@ struct clocksource {
118/* simplify initialization of mask field */ 118/* simplify initialization of mask field */
119#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 119#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
120 120
121static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
122{
123 /* freq = cyc/from
124 * mult/2^shift = ns/cyc
125 * mult = ns/cyc * 2^shift
126 * mult = from/freq * 2^shift
127 * mult = from * 2^shift / freq
128 * mult = (from<<shift) / freq
129 */
130 u64 tmp = ((u64)from) << shift_constant;
131
132 tmp += freq/2; /* round for do_div */
133 do_div(tmp, freq);
134
135 return (u32)tmp;
136}
137
121/** 138/**
122 * clocksource_khz2mult - calculates mult from khz and shift 139 * clocksource_khz2mult - calculates mult from khz and shift
123 * @khz: Clocksource frequency in KHz 140 * @khz: Clocksource frequency in KHz
@@ -128,19 +145,7 @@ struct clocksource {
128 */ 145 */
129static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) 146static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
130{ 147{
131 /* khz = cyc/(Million ns) 148 return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
132 * mult/2^shift = ns/cyc
133 * mult = ns/cyc * 2^shift
134 * mult = 1Million/khz * 2^shift
135 * mult = 1000000 * 2^shift / khz
136 * mult = (1000000<<shift) / khz
137 */
138 u64 tmp = ((u64)1000000) << shift_constant;
139
140 tmp += khz/2; /* round for do_div */
141 do_div(tmp, khz);
142
143 return (u32)tmp;
144} 149}
145 150
146/** 151/**
@@ -154,19 +159,7 @@ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
154 */ 159 */
155static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) 160static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
156{ 161{
157 /* hz = cyc/(Billion ns) 162 return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
158 * mult/2^shift = ns/cyc
159 * mult = ns/cyc * 2^shift
160 * mult = 1Billion/hz * 2^shift
161 * mult = 1000000000 * 2^shift / hz
162 * mult = (1000000000<<shift) / hz
163 */
164 u64 tmp = ((u64)1000000000) << shift_constant;
165
166 tmp += hz/2; /* round for do_div */
167 do_div(tmp, hz);
168
169 return (u32)tmp;
170} 163}
171 164
172/** 165/**
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4cd4ddf64cc7..d7c8de583a23 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -52,6 +52,10 @@ extern void compaction_defer_reset(struct zone *zone, int order,
52 bool alloc_success); 52 bool alloc_success);
53extern bool compaction_restarting(struct zone *zone, int order); 53extern bool compaction_restarting(struct zone *zone, int order);
54 54
55extern int kcompactd_run(int nid);
56extern void kcompactd_stop(int nid);
57extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
58
55#else 59#else
56static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, 60static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
57 unsigned int order, int alloc_flags, 61 unsigned int order, int alloc_flags,
@@ -84,6 +88,18 @@ static inline bool compaction_deferred(struct zone *zone, int order)
84 return true; 88 return true;
85} 89}
86 90
91static inline int kcompactd_run(int nid)
92{
93 return 0;
94}
95static inline void kcompactd_stop(int nid)
96{
97}
98
99static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
100{
101}
102
87#endif /* CONFIG_COMPACTION */ 103#endif /* CONFIG_COMPACTION */
88 104
89#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 105#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
diff --git a/include/linux/compat.h b/include/linux/compat.h
index a76c9172b2eb..f964ef79e0ad 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -5,6 +5,8 @@
5 * syscall compatibility layer. 5 * syscall compatibility layer.
6 */ 6 */
7 7
8#include <linux/types.h>
9
8#ifdef CONFIG_COMPAT 10#ifdef CONFIG_COMPAT
9 11
10#include <linux/stat.h> 12#include <linux/stat.h>
@@ -340,6 +342,12 @@ asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
340asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, 342asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
341 const struct compat_iovec __user *vec, 343 const struct compat_iovec __user *vec,
342 compat_ulong_t vlen, u32 pos_low, u32 pos_high); 344 compat_ulong_t vlen, u32 pos_low, u32 pos_high);
345asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
346 const struct compat_iovec __user *vec,
347 compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
348asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
349 const struct compat_iovec __user *vec,
350 compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
343 351
344#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 352#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
345asmlinkage long compat_sys_preadv64(unsigned long fd, 353asmlinkage long compat_sys_preadv64(unsigned long fd,
@@ -713,9 +721,22 @@ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
713 721
714asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, 722asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
715 int, const char __user *); 723 int, const char __user *);
724
725/*
726 * For most but not all architectures, "am I in a compat syscall?" and
727 * "am I a compat task?" are the same question. For architectures on which
728 * they aren't the same question, arch code can override in_compat_syscall.
729 */
730
731#ifndef in_compat_syscall
732static inline bool in_compat_syscall(void) { return is_compat_task(); }
733#endif
734
716#else 735#else
717 736
718#define is_compat_task() (0) 737#define is_compat_task() (0)
738static inline bool in_compat_syscall(void) { return false; }
719 739
720#endif /* CONFIG_COMPAT */ 740#endif /* CONFIG_COMPAT */
741
721#endif /* _LINUX_COMPAT_H */ 742#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index d1e49d52b640..de179993e039 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -10,3 +10,8 @@
10#undef uninitialized_var 10#undef uninitialized_var
11#define uninitialized_var(x) x = *(&(x)) 11#define uninitialized_var(x) x = *(&(x))
12#endif 12#endif
13
14/* same as gcc, this was present in clang-2.6 so we can assume it works
15 * with any version that can compile the kernel
16 */
17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246feed3..3d5202eda22f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -199,7 +199,7 @@
199#define unreachable() __builtin_unreachable() 199#define unreachable() __builtin_unreachable()
200 200
201/* Mark a function definition as prohibited from being cloned. */ 201/* Mark a function definition as prohibited from being cloned. */
202#define __noclone __attribute__((__noclone__)) 202#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
203 203
204#endif /* GCC_VERSION >= 40500 */ 204#endif /* GCC_VERSION >= 40500 */
205 205
@@ -246,7 +246,7 @@
246#define __HAVE_BUILTIN_BSWAP32__ 246#define __HAVE_BUILTIN_BSWAP32__
247#define __HAVE_BUILTIN_BSWAP64__ 247#define __HAVE_BUILTIN_BSWAP64__
248#endif 248#endif
249#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) 249#if GCC_VERSION >= 40800
250#define __HAVE_BUILTIN_BSWAP16__ 250#define __HAVE_BUILTIN_BSWAP16__
251#endif 251#endif
252#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 252#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 48f5aab117ae..b5ff9881bef8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -20,12 +20,14 @@
20# define __pmem __attribute__((noderef, address_space(5))) 20# define __pmem __attribute__((noderef, address_space(5)))
21#ifdef CONFIG_SPARSE_RCU_POINTER 21#ifdef CONFIG_SPARSE_RCU_POINTER
22# define __rcu __attribute__((noderef, address_space(4))) 22# define __rcu __attribute__((noderef, address_space(4)))
23#else 23#else /* CONFIG_SPARSE_RCU_POINTER */
24# define __rcu 24# define __rcu
25#endif 25#endif /* CONFIG_SPARSE_RCU_POINTER */
26# define __private __attribute__((noderef))
26extern void __chk_user_ptr(const volatile void __user *); 27extern void __chk_user_ptr(const volatile void __user *);
27extern void __chk_io_ptr(const volatile void __iomem *); 28extern void __chk_io_ptr(const volatile void __iomem *);
28#else 29# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
30#else /* __CHECKER__ */
29# define __user 31# define __user
30# define __kernel 32# define __kernel
31# define __safe 33# define __safe
@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
44# define __percpu 46# define __percpu
45# define __rcu 47# define __rcu
46# define __pmem 48# define __pmem
47#endif 49# define __private
50# define ACCESS_PRIVATE(p, member) ((p)->member)
51#endif /* __CHECKER__ */
48 52
49/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 53/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
50#define ___PASTE(a,b) a##b 54#define ___PASTE(a,b) a##b
@@ -263,8 +267,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
263 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 267 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
264 * data types like structs or unions. If the size of the accessed data 268 * data types like structs or unions. If the size of the accessed data
265 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 269 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
266 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 270 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
267 * compile-time warning. 271 * least two memcpy()s: one for the __builtin_memcpy() and then one for
272 * the macro doing the copy of variable - '__u' allocated on the stack.
268 * 273 *
269 * Their two major use cases are: (1) Mediating communication between 274 * Their two major use cases are: (1) Mediating communication between
270 * process-level code and irq/NMI handlers, all running on the same CPU, 275 * process-level code and irq/NMI handlers, all running on the same CPU,
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index f8165c129ccb..d9d6a9d77489 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -96,7 +96,8 @@ struct config_group {
96 struct config_item cg_item; 96 struct config_item cg_item;
97 struct list_head cg_children; 97 struct list_head cg_children;
98 struct configfs_subsystem *cg_subsys; 98 struct configfs_subsystem *cg_subsys;
99 struct config_group **default_groups; 99 struct list_head default_groups;
100 struct list_head group_entry;
100}; 101};
101 102
102extern void config_group_init(struct config_group *group); 103extern void config_group_init(struct config_group *group);
@@ -123,6 +124,12 @@ extern struct config_item *config_group_find_item(struct config_group *,
123 const char *); 124 const char *);
124 125
125 126
127static inline void configfs_add_default_group(struct config_group *new_group,
128 struct config_group *group)
129{
130 list_add_tail(&new_group->group_entry, &group->default_groups);
131}
132
126struct configfs_attribute { 133struct configfs_attribute {
127 const char *ca_name; 134 const char *ca_name;
128 struct module *ca_owner; 135 struct module *ca_owner;
@@ -181,7 +188,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
181} 188}
182 189
183#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \ 190#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \
184static struct configfs_attribute _pfx##attr_##_name = { \ 191static struct configfs_bin_attribute _pfx##attr_##_name = { \
185 .cb_attr = { \ 192 .cb_attr = { \
186 .ca_name = __stringify(_name), \ 193 .ca_name = __stringify(_name), \
187 .ca_mode = S_IRUGO, \ 194 .ca_mode = S_IRUGO, \
@@ -193,7 +200,7 @@ static struct configfs_attribute _pfx##attr_##_name = { \
193} 200}
194 201
195#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \ 202#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \
196static struct configfs_attribute _pfx##attr_##_name = { \ 203static struct configfs_bin_attribute _pfx##attr_##_name = { \
197 .cb_attr = { \ 204 .cb_attr = { \
198 .ca_name = __stringify(_name), \ 205 .ca_name = __stringify(_name), \
199 .ca_mode = S_IWUSR, \ 206 .ca_mode = S_IWUSR, \
@@ -251,6 +258,8 @@ int configfs_register_group(struct config_group *parent_group,
251 struct config_group *group); 258 struct config_group *group);
252void configfs_unregister_group(struct config_group *group); 259void configfs_unregister_group(struct config_group *group);
253 260
261void configfs_remove_default_groups(struct config_group *group);
262
254struct config_group * 263struct config_group *
255configfs_register_default_group(struct config_group *parent_group, 264configfs_register_default_group(struct config_group *parent_group,
256 const char *name, 265 const char *name,
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
new file mode 100644
index 000000000000..7d410260661b
--- /dev/null
+++ b/include/linux/coresight-pmu.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _LINUX_CORESIGHT_PMU_H
19#define _LINUX_CORESIGHT_PMU_H
20
21#define CORESIGHT_ETM_PMU_NAME "cs_etm"
22#define CORESIGHT_ETM_PMU_SEED 0x10
23
24/* ETMv3.5/PTM's ETMCR config bit */
25#define ETM_OPT_CYCACC 12
26#define ETM_OPT_TS 28
27
28static inline int coresight_get_trace_id(int cpu)
29{
30 /*
31 * A trace ID of value 0 is invalid, so let's start at some
32 * random value that fits in 7 bits and go from there. Since
33 * the common convention is to have data trace IDs be I(N) + 1,
34 * set instruction trace IDs as a function of the CPU number.
35 */
36 return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
37}
38
39#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a7cabfa23b55..385d62e64abb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
14#define _LINUX_CORESIGHT_H 14#define _LINUX_CORESIGHT_H
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/perf_event.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
18 19
19/* Peripheral id registers (0xFD0-0xFEC) */ 20/* Peripheral id registers (0xFD0-0xFEC) */
@@ -152,7 +153,6 @@ struct coresight_connection {
152 by @coresight_ops. 153 by @coresight_ops.
153 * @dev: The device entity associated to this component. 154 * @dev: The device entity associated to this component.
154 * @refcnt: keep track of what is in use. 155 * @refcnt: keep track of what is in use.
155 * @path_link: link of current component into the path being enabled.
156 * @orphan: true if the component has connections that haven't been linked. 156 * @orphan: true if the component has connections that haven't been linked.
157 * @enable: 'true' if component is currently part of an active path. 157 * @enable: 'true' if component is currently part of an active path.
158 * @activated: 'true' only if a _sink_ has been activated. A sink can be 158 * @activated: 'true' only if a _sink_ has been activated. A sink can be
@@ -168,7 +168,6 @@ struct coresight_device {
168 const struct coresight_ops *ops; 168 const struct coresight_ops *ops;
169 struct device dev; 169 struct device dev;
170 atomic_t *refcnt; 170 atomic_t *refcnt;
171 struct list_head path_link;
172 bool orphan; 171 bool orphan;
173 bool enable; /* true only if configured as part of a path */ 172 bool enable; /* true only if configured as part of a path */
174 bool activated; /* true only if a sink is part of a path */ 173 bool activated; /* true only if a sink is part of a path */
@@ -183,12 +182,29 @@ struct coresight_device {
183/** 182/**
184 * struct coresight_ops_sink - basic operations for a sink 183 * struct coresight_ops_sink - basic operations for a sink
185 * Operations available for sinks 184 * Operations available for sinks
186 * @enable: enables the sink. 185 * @enable: enables the sink.
187 * @disable: disables the sink. 186 * @disable: disables the sink.
187 * @alloc_buffer: initialises perf's ring buffer for trace collection.
188 * @free_buffer: release memory allocated in @get_config.
189 * @set_buffer: initialises buffer mechanic before a trace session.
190 * @reset_buffer: finalises buffer mechanic after a trace session.
191 * @update_buffer: update buffer pointers after a trace session.
188 */ 192 */
189struct coresight_ops_sink { 193struct coresight_ops_sink {
190 int (*enable)(struct coresight_device *csdev); 194 int (*enable)(struct coresight_device *csdev, u32 mode);
191 void (*disable)(struct coresight_device *csdev); 195 void (*disable)(struct coresight_device *csdev);
196 void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
197 void **pages, int nr_pages, bool overwrite);
198 void (*free_buffer)(void *config);
199 int (*set_buffer)(struct coresight_device *csdev,
200 struct perf_output_handle *handle,
201 void *sink_config);
202 unsigned long (*reset_buffer)(struct coresight_device *csdev,
203 struct perf_output_handle *handle,
204 void *sink_config, bool *lost);
205 void (*update_buffer)(struct coresight_device *csdev,
206 struct perf_output_handle *handle,
207 void *sink_config);
192}; 208};
193 209
194/** 210/**
@@ -205,14 +221,18 @@ struct coresight_ops_link {
205/** 221/**
206 * struct coresight_ops_source - basic operations for a source 222 * struct coresight_ops_source - basic operations for a source
207 * Operations available for sources. 223 * Operations available for sources.
224 * @cpu_id: returns the value of the CPU number this component
225 * is associated to.
208 * @trace_id: returns the value of the component's trace ID as known 226 * @trace_id: returns the value of the component's trace ID as known
209 to the HW. 227 * to the HW.
210 * @enable: enables tracing for a source. 228 * @enable: enables tracing for a source.
211 * @disable: disables tracing for a source. 229 * @disable: disables tracing for a source.
212 */ 230 */
213struct coresight_ops_source { 231struct coresight_ops_source {
232 int (*cpu_id)(struct coresight_device *csdev);
214 int (*trace_id)(struct coresight_device *csdev); 233 int (*trace_id)(struct coresight_device *csdev);
215 int (*enable)(struct coresight_device *csdev); 234 int (*enable)(struct coresight_device *csdev,
235 struct perf_event_attr *attr, u32 mode);
216 void (*disable)(struct coresight_device *csdev); 236 void (*disable)(struct coresight_device *csdev);
217}; 237};
218 238
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d2ca8c38f9c4..f9b1fab4388a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -16,6 +16,7 @@
16#include <linux/node.h> 16#include <linux/node.h>
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <linux/cpumask.h> 18#include <linux/cpumask.h>
19#include <linux/cpuhotplug.h>
19 20
20struct device; 21struct device;
21struct device_node; 22struct device_node;
@@ -27,6 +28,9 @@ struct cpu {
27 struct device dev; 28 struct device dev;
28}; 29};
29 30
31extern void boot_cpu_init(void);
32extern void boot_cpu_state_init(void);
33
30extern int register_cpu(struct cpu *cpu, int num); 34extern int register_cpu(struct cpu *cpu, int num);
31extern struct device *get_cpu_device(unsigned cpu); 35extern struct device *get_cpu_device(unsigned cpu);
32extern bool cpu_is_hotpluggable(unsigned cpu); 36extern bool cpu_is_hotpluggable(unsigned cpu);
@@ -74,7 +78,7 @@ enum {
74 /* migration should happen before other stuff but after perf */ 78 /* migration should happen before other stuff but after perf */
75 CPU_PRI_PERF = 20, 79 CPU_PRI_PERF = 20,
76 CPU_PRI_MIGRATION = 10, 80 CPU_PRI_MIGRATION = 10,
77 CPU_PRI_SMPBOOT = 9, 81
78 /* bring up workqueues before normal notifiers and down after */ 82 /* bring up workqueues before normal notifiers and down after */
79 CPU_PRI_WORKQUEUE_UP = 5, 83 CPU_PRI_WORKQUEUE_UP = 5,
80 CPU_PRI_WORKQUEUE_DOWN = -5, 84 CPU_PRI_WORKQUEUE_DOWN = -5,
@@ -97,9 +101,7 @@ enum {
97 * Called on the new cpu, just before 101 * Called on the new cpu, just before
98 * enabling interrupts. Must not sleep, 102 * enabling interrupts. Must not sleep,
99 * must not fail */ 103 * must not fail */
100#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached 104#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
101 * idle loop. */
102#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
103 * perhaps due to preemption. */ 105 * perhaps due to preemption. */
104 106
105/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend 107/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
@@ -118,6 +120,7 @@ enum {
118 120
119 121
120#ifdef CONFIG_SMP 122#ifdef CONFIG_SMP
123extern bool cpuhp_tasks_frozen;
121/* Need to know about CPUs going up/down? */ 124/* Need to know about CPUs going up/down? */
122#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) 125#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
123#define cpu_notifier(fn, pri) { \ 126#define cpu_notifier(fn, pri) { \
@@ -167,7 +170,6 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb)
167} 170}
168#endif 171#endif
169 172
170void smpboot_thread_init(void);
171int cpu_up(unsigned int cpu); 173int cpu_up(unsigned int cpu);
172void notify_cpu_starting(unsigned int cpu); 174void notify_cpu_starting(unsigned int cpu);
173extern void cpu_maps_update_begin(void); 175extern void cpu_maps_update_begin(void);
@@ -177,6 +179,7 @@ extern void cpu_maps_update_done(void);
177#define cpu_notifier_register_done cpu_maps_update_done 179#define cpu_notifier_register_done cpu_maps_update_done
178 180
179#else /* CONFIG_SMP */ 181#else /* CONFIG_SMP */
182#define cpuhp_tasks_frozen 0
180 183
181#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) 184#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
182#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) 185#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
@@ -215,10 +218,6 @@ static inline void cpu_notifier_register_done(void)
215{ 218{
216} 219}
217 220
218static inline void smpboot_thread_init(void)
219{
220}
221
222#endif /* CONFIG_SMP */ 221#endif /* CONFIG_SMP */
223extern struct bus_type cpu_subsys; 222extern struct bus_type cpu_subsys;
224 223
@@ -265,11 +264,6 @@ static inline int disable_nonboot_cpus(void) { return 0; }
265static inline void enable_nonboot_cpus(void) {} 264static inline void enable_nonboot_cpus(void) {}
266#endif /* !CONFIG_PM_SLEEP_SMP */ 265#endif /* !CONFIG_PM_SLEEP_SMP */
267 266
268enum cpuhp_state {
269 CPUHP_OFFLINE,
270 CPUHP_ONLINE,
271};
272
273void cpu_startup_entry(enum cpuhp_state state); 267void cpu_startup_entry(enum cpuhp_state state);
274 268
275void cpu_idle_poll_ctrl(bool enable); 269void cpu_idle_poll_ctrl(bool enable);
@@ -280,14 +274,15 @@ void arch_cpu_idle_enter(void);
280void arch_cpu_idle_exit(void); 274void arch_cpu_idle_exit(void);
281void arch_cpu_idle_dead(void); 275void arch_cpu_idle_dead(void);
282 276
283DECLARE_PER_CPU(bool, cpu_dead_idle);
284
285int cpu_report_state(int cpu); 277int cpu_report_state(int cpu);
286int cpu_check_up_prepare(int cpu); 278int cpu_check_up_prepare(int cpu);
287void cpu_set_state_online(int cpu); 279void cpu_set_state_online(int cpu);
288#ifdef CONFIG_HOTPLUG_CPU 280#ifdef CONFIG_HOTPLUG_CPU
289bool cpu_wait_death(unsigned int cpu, int seconds); 281bool cpu_wait_death(unsigned int cpu, int seconds);
290bool cpu_report_death(void); 282bool cpu_report_death(void);
283void cpuhp_report_idle_dead(void);
284#else
285static inline void cpuhp_report_idle_dead(void) { }
291#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 286#endif /* #ifdef CONFIG_HOTPLUG_CPU */
292 287
293#endif /* _LINUX_CPU_H_ */ 288#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 88a4215125bc..718e8725de8a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -80,7 +80,6 @@ struct cpufreq_policy {
80 unsigned int last_policy; /* policy before unplug */ 80 unsigned int last_policy; /* policy before unplug */
81 struct cpufreq_governor *governor; /* see below */ 81 struct cpufreq_governor *governor; /* see below */
82 void *governor_data; 82 void *governor_data;
83 bool governor_enabled; /* governor start/stop flag */
84 char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ 83 char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
85 84
86 struct work_struct update; /* if update_policy() needs to be 85 struct work_struct update; /* if update_policy() needs to be
@@ -100,10 +99,6 @@ struct cpufreq_policy {
100 * - Any routine that will write to the policy structure and/or may take away 99 * - Any routine that will write to the policy structure and/or may take away
101 * the policy altogether (eg. CPU hotplug), will hold this lock in write 100 * the policy altogether (eg. CPU hotplug), will hold this lock in write
102 * mode before doing so. 101 * mode before doing so.
103 *
104 * Additional rules:
105 * - Lock should not be held across
106 * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
107 */ 102 */
108 struct rw_semaphore rwsem; 103 struct rw_semaphore rwsem;
109 104
@@ -464,29 +459,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
464int cpufreq_register_governor(struct cpufreq_governor *governor); 459int cpufreq_register_governor(struct cpufreq_governor *governor);
465void cpufreq_unregister_governor(struct cpufreq_governor *governor); 460void cpufreq_unregister_governor(struct cpufreq_governor *governor);
466 461
467/* CPUFREQ DEFAULT GOVERNOR */ 462struct cpufreq_governor *cpufreq_default_governor(void);
468/* 463struct cpufreq_governor *cpufreq_fallback_governor(void);
469 * Performance governor is fallback governor if any other gov failed to auto
470 * load due latency restrictions
471 */
472#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
473extern struct cpufreq_governor cpufreq_gov_performance;
474#endif
475#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
476#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance)
477#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
478extern struct cpufreq_governor cpufreq_gov_powersave;
479#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave)
480#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
481extern struct cpufreq_governor cpufreq_gov_userspace;
482#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace)
483#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
484extern struct cpufreq_governor cpufreq_gov_ondemand;
485#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
486#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
487extern struct cpufreq_governor cpufreq_gov_conservative;
488#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
489#endif
490 464
491/********************************************************************* 465/*********************************************************************
492 * FREQUENCY TABLE HELPERS * 466 * FREQUENCY TABLE HELPERS *
@@ -525,16 +499,6 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
525} 499}
526#endif 500#endif
527 501
528static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
529{
530 while ((*pos)->frequency != CPUFREQ_TABLE_END)
531 if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID)
532 return true;
533 else
534 (*pos)++;
535 return false;
536}
537
538/* 502/*
539 * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table 503 * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
540 * @pos: the cpufreq_frequency_table * to use as a loop cursor. 504 * @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -551,8 +515,11 @@ static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
551 * @table: the cpufreq_frequency_table * to iterate over. 515 * @table: the cpufreq_frequency_table * to iterate over.
552 */ 516 */
553 517
554#define cpufreq_for_each_valid_entry(pos, table) \ 518#define cpufreq_for_each_valid_entry(pos, table) \
555 for (pos = table; cpufreq_next_valid(&pos); pos++) 519 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
520 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
521 continue; \
522 else
556 523
557int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 524int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
558 struct cpufreq_frequency_table *table); 525 struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
new file mode 100644
index 000000000000..5d68e15e46b7
--- /dev/null
+++ b/include/linux/cpuhotplug.h
@@ -0,0 +1,93 @@
1#ifndef __CPUHOTPLUG_H
2#define __CPUHOTPLUG_H
3
4enum cpuhp_state {
5 CPUHP_OFFLINE,
6 CPUHP_CREATE_THREADS,
7 CPUHP_NOTIFY_PREPARE,
8 CPUHP_BRINGUP_CPU,
9 CPUHP_AP_IDLE_DEAD,
10 CPUHP_AP_OFFLINE,
11 CPUHP_AP_NOTIFY_STARTING,
12 CPUHP_AP_ONLINE,
13 CPUHP_TEARDOWN_CPU,
14 CPUHP_AP_ONLINE_IDLE,
15 CPUHP_AP_SMPBOOT_THREADS,
16 CPUHP_AP_NOTIFY_ONLINE,
17 CPUHP_AP_ONLINE_DYN,
18 CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
19 CPUHP_ONLINE,
20};
21
22int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
23 int (*startup)(unsigned int cpu),
24 int (*teardown)(unsigned int cpu));
25
26/**
27 * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
28 * @state: The state for which the calls are installed
29 * @name: Name of the callback (will be used in debug output)
30 * @startup: startup callback function
31 * @teardown: teardown callback function
32 *
33 * Installs the callback functions and invokes the startup callback on
34 * the present cpus which have already reached the @state.
35 */
36static inline int cpuhp_setup_state(enum cpuhp_state state,
37 const char *name,
38 int (*startup)(unsigned int cpu),
39 int (*teardown)(unsigned int cpu))
40{
41 return __cpuhp_setup_state(state, name, true, startup, teardown);
42}
43
44/**
45 * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
46 * callbacks
47 * @state: The state for which the calls are installed
48 * @name: Name of the callback.
49 * @startup: startup callback function
50 * @teardown: teardown callback function
51 *
52 * Same as @cpuhp_setup_state except that no calls are executed are invoked
53 * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
54 */
55static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
56 const char *name,
57 int (*startup)(unsigned int cpu),
58 int (*teardown)(unsigned int cpu))
59{
60 return __cpuhp_setup_state(state, name, false, startup, teardown);
61}
62
63void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
64
65/**
66 * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
67 * @state: The state for which the calls are removed
68 *
69 * Removes the callback functions and invokes the teardown callback on
70 * the present cpus which have already reached the @state.
71 */
72static inline void cpuhp_remove_state(enum cpuhp_state state)
73{
74 __cpuhp_remove_state(state, true);
75}
76
77/**
78 * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
79 * teardown
80 * @state: The state for which the calls are removed
81 */
82static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
83{
84 __cpuhp_remove_state(state, false);
85}
86
87#ifdef CONFIG_SMP
88void cpuhp_online_idle(enum cpuhp_state state);
89#else
90static inline void cpuhp_online_idle(enum cpuhp_state state) { }
91#endif
92
93#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index fc14275ff34e..40cee6b77a93 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -607,8 +607,6 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
607 607
608/** 608/**
609 * cpumask_size - size to allocate for a 'struct cpumask' in bytes 609 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
610 *
611 * This will eventually be a runtime variable, depending on nr_cpu_ids.
612 */ 610 */
613static inline size_t cpumask_size(void) 611static inline size_t cpumask_size(void)
614{ 612{
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
137 task_unlock(current); 137 task_unlock(current);
138} 138}
139 139
140extern void cpuset_post_attach_flush(void);
141
142#else /* !CONFIG_CPUSETS */ 140#else /* !CONFIG_CPUSETS */
143 141
144static inline bool cpusets_enabled(void) { return false; } 142static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
245 return false; 243 return false;
246} 244}
247 245
248static inline void cpuset_post_attach_flush(void)
249{
250}
251
252#endif /* !CONFIG_CPUSETS */ 246#endif /* !CONFIG_CPUSETS */
253 247
254#endif /* _LINUX_CPUSET_H */ 248#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8d70e1361ecd..257db64562e5 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -377,7 +377,10 @@ extern struct user_namespace init_user_ns;
377#ifdef CONFIG_USER_NS 377#ifdef CONFIG_USER_NS
378#define current_user_ns() (current_cred_xxx(user_ns)) 378#define current_user_ns() (current_cred_xxx(user_ns))
379#else 379#else
380#define current_user_ns() (&init_user_ns) 380static inline struct user_namespace *current_user_ns(void)
381{
382 return &init_user_ns;
383}
381#endif 384#endif
382 385
383 386
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e71cb70a1ac2..99c94899ad0f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -54,7 +54,6 @@
54#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 54#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
55#define CRYPTO_ALG_TYPE_RNG 0x0000000c 55#define CRYPTO_ALG_TYPE_RNG 0x0000000c
56#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 56#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
57#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
58 57
59#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 58#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
60#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c 59#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -137,7 +136,6 @@ struct scatterlist;
137struct crypto_ablkcipher; 136struct crypto_ablkcipher;
138struct crypto_async_request; 137struct crypto_async_request;
139struct crypto_blkcipher; 138struct crypto_blkcipher;
140struct crypto_hash;
141struct crypto_tfm; 139struct crypto_tfm;
142struct crypto_type; 140struct crypto_type;
143struct skcipher_givcrypt_request; 141struct skcipher_givcrypt_request;
@@ -187,11 +185,6 @@ struct cipher_desc {
187 void *info; 185 void *info;
188}; 186};
189 187
190struct hash_desc {
191 struct crypto_hash *tfm;
192 u32 flags;
193};
194
195/** 188/**
196 * DOC: Block Cipher Algorithm Definitions 189 * DOC: Block Cipher Algorithm Definitions
197 * 190 *
@@ -519,18 +512,6 @@ struct cipher_tfm {
519 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 512 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
520}; 513};
521 514
522struct hash_tfm {
523 int (*init)(struct hash_desc *desc);
524 int (*update)(struct hash_desc *desc,
525 struct scatterlist *sg, unsigned int nsg);
526 int (*final)(struct hash_desc *desc, u8 *out);
527 int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
528 unsigned int nsg, u8 *out);
529 int (*setkey)(struct crypto_hash *tfm, const u8 *key,
530 unsigned int keylen);
531 unsigned int digestsize;
532};
533
534struct compress_tfm { 515struct compress_tfm {
535 int (*cot_compress)(struct crypto_tfm *tfm, 516 int (*cot_compress)(struct crypto_tfm *tfm,
536 const u8 *src, unsigned int slen, 517 const u8 *src, unsigned int slen,
@@ -543,7 +524,6 @@ struct compress_tfm {
543#define crt_ablkcipher crt_u.ablkcipher 524#define crt_ablkcipher crt_u.ablkcipher
544#define crt_blkcipher crt_u.blkcipher 525#define crt_blkcipher crt_u.blkcipher
545#define crt_cipher crt_u.cipher 526#define crt_cipher crt_u.cipher
546#define crt_hash crt_u.hash
547#define crt_compress crt_u.compress 527#define crt_compress crt_u.compress
548 528
549struct crypto_tfm { 529struct crypto_tfm {
@@ -554,7 +534,6 @@ struct crypto_tfm {
554 struct ablkcipher_tfm ablkcipher; 534 struct ablkcipher_tfm ablkcipher;
555 struct blkcipher_tfm blkcipher; 535 struct blkcipher_tfm blkcipher;
556 struct cipher_tfm cipher; 536 struct cipher_tfm cipher;
557 struct hash_tfm hash;
558 struct compress_tfm compress; 537 struct compress_tfm compress;
559 } crt_u; 538 } crt_u;
560 539
@@ -581,10 +560,6 @@ struct crypto_comp {
581 struct crypto_tfm base; 560 struct crypto_tfm base;
582}; 561};
583 562
584struct crypto_hash {
585 struct crypto_tfm base;
586};
587
588enum { 563enum {
589 CRYPTOA_UNSPEC, 564 CRYPTOA_UNSPEC,
590 CRYPTOA_ALG, 565 CRYPTOA_ALG,
@@ -1577,233 +1552,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1577 dst, src); 1552 dst, src);
1578} 1553}
1579 1554
1580/**
1581 * DOC: Synchronous Message Digest API
1582 *
1583 * The synchronous message digest API is used with the ciphers of type
1584 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
1585 */
1586
1587static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
1588{
1589 return (struct crypto_hash *)tfm;
1590}
1591
1592static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
1593{
1594 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
1595 CRYPTO_ALG_TYPE_HASH_MASK);
1596 return __crypto_hash_cast(tfm);
1597}
1598
1599/**
1600 * crypto_alloc_hash() - allocate synchronous message digest handle
1601 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1602 * message digest cipher
1603 * @type: specifies the type of the cipher
1604 * @mask: specifies the mask for the cipher
1605 *
1606 * Allocate a cipher handle for a message digest. The returned struct
1607 * crypto_hash is the cipher handle that is required for any subsequent
1608 * API invocation for that message digest.
1609 *
1610 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1611 * of an error, PTR_ERR() returns the error code.
1612 */
1613static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
1614 u32 type, u32 mask)
1615{
1616 type &= ~CRYPTO_ALG_TYPE_MASK;
1617 mask &= ~CRYPTO_ALG_TYPE_MASK;
1618 type |= CRYPTO_ALG_TYPE_HASH;
1619 mask |= CRYPTO_ALG_TYPE_HASH_MASK;
1620
1621 return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
1622}
1623
1624static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
1625{
1626 return &tfm->base;
1627}
1628
1629/**
1630 * crypto_free_hash() - zeroize and free message digest handle
1631 * @tfm: cipher handle to be freed
1632 */
1633static inline void crypto_free_hash(struct crypto_hash *tfm)
1634{
1635 crypto_free_tfm(crypto_hash_tfm(tfm));
1636}
1637
1638/**
1639 * crypto_has_hash() - Search for the availability of a message digest
1640 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1641 * message digest cipher
1642 * @type: specifies the type of the cipher
1643 * @mask: specifies the mask for the cipher
1644 *
1645 * Return: true when the message digest cipher is known to the kernel crypto
1646 * API; false otherwise
1647 */
1648static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
1649{
1650 type &= ~CRYPTO_ALG_TYPE_MASK;
1651 mask &= ~CRYPTO_ALG_TYPE_MASK;
1652 type |= CRYPTO_ALG_TYPE_HASH;
1653 mask |= CRYPTO_ALG_TYPE_HASH_MASK;
1654
1655 return crypto_has_alg(alg_name, type, mask);
1656}
1657
1658static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
1659{
1660 return &crypto_hash_tfm(tfm)->crt_hash;
1661}
1662
1663/**
1664 * crypto_hash_blocksize() - obtain block size for message digest
1665 * @tfm: cipher handle
1666 *
1667 * The block size for the message digest cipher referenced with the cipher
1668 * handle is returned.
1669 *
1670 * Return: block size of cipher
1671 */
1672static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
1673{
1674 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
1675}
1676
1677static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
1678{
1679 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
1680}
1681
1682/**
1683 * crypto_hash_digestsize() - obtain message digest size
1684 * @tfm: cipher handle
1685 *
1686 * The size for the message digest created by the message digest cipher
1687 * referenced with the cipher handle is returned.
1688 *
1689 * Return: message digest size
1690 */
1691static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
1692{
1693 return crypto_hash_crt(tfm)->digestsize;
1694}
1695
1696static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
1697{
1698 return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
1699}
1700
1701static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
1702{
1703 crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
1704}
1705
1706static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
1707{
1708 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
1709}
1710
1711/**
1712 * crypto_hash_init() - (re)initialize message digest handle
1713 * @desc: cipher request handle that to be filled by caller --
1714 * desc.tfm is filled with the hash cipher handle;
1715 * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1716 *
1717 * The call (re-)initializes the message digest referenced by the hash cipher
1718 * request handle. Any potentially existing state created by previous
1719 * operations is discarded.
1720 *
1721 * Return: 0 if the message digest initialization was successful; < 0 if an
1722 * error occurred
1723 */
1724static inline int crypto_hash_init(struct hash_desc *desc)
1725{
1726 return crypto_hash_crt(desc->tfm)->init(desc);
1727}
1728
1729/**
1730 * crypto_hash_update() - add data to message digest for processing
1731 * @desc: cipher request handle
1732 * @sg: scatter / gather list pointing to the data to be added to the message
1733 * digest
1734 * @nbytes: number of bytes to be processed from @sg
1735 *
1736 * Updates the message digest state of the cipher handle pointed to by the
1737 * hash cipher request handle with the input data pointed to by the
1738 * scatter/gather list.
1739 *
1740 * Return: 0 if the message digest update was successful; < 0 if an error
1741 * occurred
1742 */
1743static inline int crypto_hash_update(struct hash_desc *desc,
1744 struct scatterlist *sg,
1745 unsigned int nbytes)
1746{
1747 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
1748}
1749
1750/**
1751 * crypto_hash_final() - calculate message digest
1752 * @desc: cipher request handle
1753 * @out: message digest output buffer -- The caller must ensure that the out
1754 * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
1755 * function).
1756 *
1757 * Finalize the message digest operation and create the message digest
1758 * based on all data added to the cipher handle. The message digest is placed
1759 * into the output buffer.
1760 *
1761 * Return: 0 if the message digest creation was successful; < 0 if an error
1762 * occurred
1763 */
1764static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
1765{
1766 return crypto_hash_crt(desc->tfm)->final(desc, out);
1767}
1768
1769/**
1770 * crypto_hash_digest() - calculate message digest for a buffer
1771 * @desc: see crypto_hash_final()
1772 * @sg: see crypto_hash_update()
1773 * @nbytes: see crypto_hash_update()
1774 * @out: see crypto_hash_final()
1775 *
1776 * This function is a "short-hand" for the function calls of crypto_hash_init,
1777 * crypto_hash_update and crypto_hash_final. The parameters have the same
1778 * meaning as discussed for those separate three functions.
1779 *
1780 * Return: 0 if the message digest creation was successful; < 0 if an error
1781 * occurred
1782 */
1783static inline int crypto_hash_digest(struct hash_desc *desc,
1784 struct scatterlist *sg,
1785 unsigned int nbytes, u8 *out)
1786{
1787 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
1788}
1789
1790/**
1791 * crypto_hash_setkey() - set key for message digest
1792 * @hash: cipher handle
1793 * @key: buffer holding the key
1794 * @keylen: length of the key in bytes
1795 *
1796 * The caller provided key is set for the message digest cipher. The cipher
1797 * handle must point to a keyed hash in order for this function to succeed.
1798 *
1799 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1800 */
1801static inline int crypto_hash_setkey(struct crypto_hash *hash,
1802 const u8 *key, unsigned int keylen)
1803{
1804 return crypto_hash_crt(hash)->setkey(hash, key, keylen);
1805}
1806
1807static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 1555static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1808{ 1556{
1809 return (struct crypto_comp *)tfm; 1557 return (struct crypto_comp *)tfm;
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h
index 542888504994..05b97144d342 100644
--- a/include/linux/davinci_emac.h
+++ b/include/linux/davinci_emac.h
@@ -12,7 +12,7 @@
12#define _LINUX_DAVINCI_EMAC_H 12#define _LINUX_DAVINCI_EMAC_H
13 13
14#include <linux/if_ether.h> 14#include <linux/if_ether.h>
15#include <linux/memory.h> 15#include <linux/nvmem-consumer.h>
16 16
17struct mdio_platform_data { 17struct mdio_platform_data {
18 unsigned long bus_freq; 18 unsigned long bus_freq;
@@ -46,5 +46,5 @@ enum {
46 EMAC_VERSION_2, /* DM646x */ 46 EMAC_VERSION_2, /* DM646x */
47}; 47};
48 48
49void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context); 49void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context);
50#endif 50#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c4b5f4b3f8f8..7e9422cb5989 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -161,6 +161,7 @@ struct dentry_operations {
161 struct vfsmount *(*d_automount)(struct path *); 161 struct vfsmount *(*d_automount)(struct path *);
162 int (*d_manage)(struct dentry *, bool); 162 int (*d_manage)(struct dentry *, bool);
163 struct inode *(*d_select_inode)(struct dentry *, unsigned); 163 struct inode *(*d_select_inode)(struct dentry *, unsigned);
164 struct dentry *(*d_real)(struct dentry *, struct inode *);
164} ____cacheline_aligned; 165} ____cacheline_aligned;
165 166
166/* 167/*
@@ -228,6 +229,9 @@ struct dentry_operations {
228#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ 229#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
229#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ 230#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
230 231
232#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
233#define DCACHE_OP_REAL 0x08000000
234
231extern seqlock_t rename_lock; 235extern seqlock_t rename_lock;
232 236
233/* 237/*
@@ -246,6 +250,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
246extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); 250extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
247extern struct dentry * d_splice_alias(struct inode *, struct dentry *); 251extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
248extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); 252extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
253extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
249extern struct dentry *d_find_any_alias(struct inode *inode); 254extern struct dentry *d_find_any_alias(struct inode *inode);
250extern struct dentry * d_obtain_alias(struct inode *); 255extern struct dentry * d_obtain_alias(struct inode *);
251extern struct dentry * d_obtain_root(struct inode *); 256extern struct dentry * d_obtain_root(struct inode *);
@@ -272,38 +277,8 @@ extern int have_submounts(struct dentry *);
272 * This adds the entry to the hash queues. 277 * This adds the entry to the hash queues.
273 */ 278 */
274extern void d_rehash(struct dentry *); 279extern void d_rehash(struct dentry *);
275
276/**
277 * d_add - add dentry to hash queues
278 * @entry: dentry to add
279 * @inode: The inode to attach to this dentry
280 *
281 * This adds the entry to the hash queues and initializes @inode.
282 * The entry was actually filled in earlier during d_alloc().
283 */
284 280
285static inline void d_add(struct dentry *entry, struct inode *inode) 281extern void d_add(struct dentry *, struct inode *);
286{
287 d_instantiate(entry, inode);
288 d_rehash(entry);
289}
290
291/**
292 * d_add_unique - add dentry to hash queues without aliasing
293 * @entry: dentry to add
294 * @inode: The inode to attach to this dentry
295 *
296 * This adds the entry to the hash queues and initializes @inode.
297 * The entry was actually filled in earlier during d_alloc().
298 */
299static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
300{
301 struct dentry *res;
302
303 res = d_instantiate_unique(entry, inode);
304 d_rehash(res != NULL ? res : entry);
305 return res;
306}
307 282
308extern void dentry_update_name_case(struct dentry *, struct qstr *); 283extern void dentry_update_name_case(struct dentry *, struct qstr *);
309 284
@@ -582,4 +557,24 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
582 return upper; 557 return upper;
583} 558}
584 559
560static inline struct dentry *d_real(struct dentry *dentry)
561{
562 if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
563 return dentry->d_op->d_real(dentry, NULL);
564 else
565 return dentry;
566}
567
568static inline struct inode *vfs_select_inode(struct dentry *dentry,
569 unsigned open_flags)
570{
571 struct inode *inode = d_inode(dentry);
572
573 if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
574 inode = dentry->d_op->d_select_inode(dentry, open_flags);
575
576 return inode;
577}
578
579
585#endif /* __LINUX_DCACHE_H */ 580#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 19c066dce1da..981e53ab84e8 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -162,6 +162,14 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
162 return ERR_PTR(-ENODEV); 162 return ERR_PTR(-ENODEV);
163} 163}
164 164
165static inline struct dentry *debugfs_create_automount(const char *name,
166 struct dentry *parent,
167 struct vfsmount *(*f)(void *),
168 void *data)
169{
170 return ERR_PTR(-ENODEV);
171}
172
165static inline void debugfs_remove(struct dentry *dentry) 173static inline void debugfs_remove(struct dentry *dentry)
166{ } 174{ }
167 175
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ec1c61c87d89..0830c9e86f0d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -124,6 +124,8 @@ struct dm_dev {
124 char name[16]; 124 char name[16];
125}; 125};
126 126
127dev_t dm_get_dev_t(const char *path);
128
127/* 129/*
128 * Constructors should call these functions to ensure destination devices 130 * Constructors should call these functions to ensure destination devices
129 * are opened/closed correctly. 131 * are opened/closed correctly.
@@ -190,6 +192,13 @@ struct target_type {
190#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 192#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
191 193
192/* 194/*
195 * Indicates that a target may replace any target; even immutable targets.
196 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
197 */
198#define DM_TARGET_WILDCARD 0x00000008
199#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
200
201/*
193 * Some targets need to be sent the same WRITE bio severals times so 202 * Some targets need to be sent the same WRITE bio severals times so
194 * that they can send copies of it to different devices. This function 203 * that they can send copies of it to different devices. This function
195 * examines any supplied bio and returns the number of copies of it the 204 * examines any supplied bio and returns the number of copies of it the
@@ -231,10 +240,10 @@ struct dm_target {
231 unsigned num_write_same_bios; 240 unsigned num_write_same_bios;
232 241
233 /* 242 /*
234 * The minimum number of extra bytes allocated in each bio for the 243 * The minimum number of extra bytes allocated in each io for the
235 * target to use. dm_per_bio_data returns the data location. 244 * target to use.
236 */ 245 */
237 unsigned per_bio_data_size; 246 unsigned per_io_data_size;
238 247
239 /* 248 /*
240 * If defined, this function is called to find out how many 249 * If defined, this function is called to find out how many
diff --git a/include/linux/device.h b/include/linux/device.h
index 6d6f1fec092f..002c59728dbe 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -70,8 +70,11 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
70 * @dev_groups: Default attributes of the devices on the bus. 70 * @dev_groups: Default attributes of the devices on the bus.
71 * @drv_groups: Default attributes of the device drivers on the bus. 71 * @drv_groups: Default attributes of the device drivers on the bus.
72 * @match: Called, perhaps multiple times, whenever a new device or driver 72 * @match: Called, perhaps multiple times, whenever a new device or driver
73 * is added for this bus. It should return a nonzero value if the 73 * is added for this bus. It should return a positive value if the
74 * given device can be handled by the given driver. 74 * given device can be handled by the given driver and zero
75 * otherwise. It may also return error code if determining that
76 * the driver supports the device is not possible. In case of
77 * -EPROBE_DEFER it will queue the device for deferred probing.
75 * @uevent: Called when a device is added, removed, or a few other things 78 * @uevent: Called when a device is added, removed, or a few other things
76 * that generate uevents to add the environment variables. 79 * that generate uevents to add the environment variables.
77 * @probe: Called when a new device or driver add to this bus, and callback 80 * @probe: Called when a new device or driver add to this bus, and callback
@@ -682,6 +685,18 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
682int devm_add_action(struct device *dev, void (*action)(void *), void *data); 685int devm_add_action(struct device *dev, void (*action)(void *), void *data);
683void devm_remove_action(struct device *dev, void (*action)(void *), void *data); 686void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
684 687
688static inline int devm_add_action_or_reset(struct device *dev,
689 void (*action)(void *), void *data)
690{
691 int ret;
692
693 ret = devm_add_action(dev, action, data);
694 if (ret)
695 action(data);
696
697 return ret;
698}
699
685struct device_dma_parameters { 700struct device_dma_parameters {
686 /* 701 /*
687 * a low level driver may set these to teach IOMMU code about 702 * a low level driver may set these to teach IOMMU code about
@@ -958,6 +973,11 @@ static inline void device_lock(struct device *dev)
958 mutex_lock(&dev->mutex); 973 mutex_lock(&dev->mutex);
959} 974}
960 975
976static inline int device_lock_interruptible(struct device *dev)
977{
978 return mutex_lock_interruptible(&dev->mutex);
979}
980
961static inline int device_trylock(struct device *dev) 981static inline int device_trylock(struct device *dev)
962{ 982{
963 return mutex_trylock(&dev->mutex); 983 return mutex_trylock(&dev->mutex);
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index e0ee0b3000b2..5871f292b596 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,37 +15,23 @@
15 15
16#include <linux/errno.h> 16#include <linux/errno.h>
17 17
18struct pts_fs_info;
19
18#ifdef CONFIG_UNIX98_PTYS 20#ifdef CONFIG_UNIX98_PTYS
19 21
20int devpts_new_index(struct inode *ptmx_inode); 22/* Look up a pts fs info and get a ref to it */
21void devpts_kill_index(struct inode *ptmx_inode, int idx); 23struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
22void devpts_add_ref(struct inode *ptmx_inode); 24void devpts_put_ref(struct pts_fs_info *);
23void devpts_del_ref(struct inode *ptmx_inode); 25
26int devpts_new_index(struct pts_fs_info *);
27void devpts_kill_index(struct pts_fs_info *, int);
28
24/* mknod in devpts */ 29/* mknod in devpts */
25struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 30struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
26 void *priv);
27/* get private structure */ 31/* get private structure */
28void *devpts_get_priv(struct inode *pts_inode); 32void *devpts_get_priv(struct dentry *);
29/* unlink */ 33/* unlink */
30void devpts_pty_kill(struct inode *inode); 34void devpts_pty_kill(struct dentry *);
31
32#else
33
34/* Dummy stubs in the no-pty case */
35static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
36static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
37static inline void devpts_add_ref(struct inode *ptmx_inode) { }
38static inline void devpts_del_ref(struct inode *ptmx_inode) { }
39static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
40 dev_t device, int index, void *priv)
41{
42 return ERR_PTR(-EINVAL);
43}
44static inline void *devpts_get_priv(struct inode *pts_inode)
45{
46 return NULL;
47}
48static inline void devpts_pty_kill(struct inode *inode) { }
49 35
50#endif 36#endif
51 37
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 99c0be00b47c..5246239a4953 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -18,6 +18,7 @@ enum dma_attr {
18 DMA_ATTR_NO_KERNEL_MAPPING, 18 DMA_ATTR_NO_KERNEL_MAPPING,
19 DMA_ATTR_SKIP_CPU_SYNC, 19 DMA_ATTR_SKIP_CPU_SYNC,
20 DMA_ATTR_FORCE_CONTIGUOUS, 20 DMA_ATTR_FORCE_CONTIGUOUS,
21 DMA_ATTR_ALLOC_SINGLE_PAGES,
21 DMA_ATTR_MAX, 22 DMA_ATTR_MAX,
22}; 23};
23 24
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f98bd7068d55..3fe90d494edb 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -54,7 +54,7 @@ struct dma_buf_attachment;
54 * @release: release this buffer; to be called after the last dma_buf_put. 54 * @release: release this buffer; to be called after the last dma_buf_put.
55 * @begin_cpu_access: [optional] called before cpu access to invalidate cpu 55 * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
56 * caches and allocate backing storage (if not yet done) 56 * caches and allocate backing storage (if not yet done)
57 * respectively pin the objet into memory. 57 * respectively pin the object into memory.
58 * @end_cpu_access: [optional] called after cpu access to flush caches. 58 * @end_cpu_access: [optional] called after cpu access to flush caches.
59 * @kmap_atomic: maps a page from the buffer into kernel address 59 * @kmap_atomic: maps a page from the buffer into kernel address
60 * space, users may not block until the subsequent unmap call. 60 * space, users may not block until the subsequent unmap call.
@@ -93,10 +93,8 @@ struct dma_buf_ops {
93 /* after final dma_buf_put() */ 93 /* after final dma_buf_put() */
94 void (*release)(struct dma_buf *); 94 void (*release)(struct dma_buf *);
95 95
96 int (*begin_cpu_access)(struct dma_buf *, size_t, size_t, 96 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
97 enum dma_data_direction); 97 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
98 void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
99 enum dma_data_direction);
100 void *(*kmap_atomic)(struct dma_buf *, unsigned long); 98 void *(*kmap_atomic)(struct dma_buf *, unsigned long);
101 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); 99 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
102 void *(*kmap)(struct dma_buf *, unsigned long); 100 void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,10 +222,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
224 enum dma_data_direction); 222 enum dma_data_direction);
225void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 223void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
226 enum dma_data_direction); 224 enum dma_data_direction);
227int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, 225int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
228 enum dma_data_direction dir); 226 enum dma_data_direction dir);
229void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, 227int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
230 enum dma_data_direction dir); 228 enum dma_data_direction dir);
231void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); 229void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
232void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 230void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
233void *dma_buf_kmap(struct dma_buf *, unsigned long); 231void *dma_buf_kmap(struct dma_buf *, unsigned long);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 728ef074602a..9ea9aba28049 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -70,6 +70,8 @@ struct dma_map_ops {
70 int is_phys; 70 int is_phys;
71}; 71};
72 72
73extern struct dma_map_ops dma_noop_ops;
74
73#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 75#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
74 76
75#define DMA_MASK_NONE 0x0ULL 77#define DMA_MASK_NONE 0x0ULL
@@ -641,31 +643,40 @@ static inline void dmam_release_declared_memory(struct device *dev)
641} 643}
642#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 644#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
643 645
644static inline void *dma_alloc_writecombine(struct device *dev, size_t size, 646static inline void *dma_alloc_wc(struct device *dev, size_t size,
645 dma_addr_t *dma_addr, gfp_t gfp) 647 dma_addr_t *dma_addr, gfp_t gfp)
646{ 648{
647 DEFINE_DMA_ATTRS(attrs); 649 DEFINE_DMA_ATTRS(attrs);
648 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 650 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
649 return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); 651 return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
650} 652}
653#ifndef dma_alloc_writecombine
654#define dma_alloc_writecombine dma_alloc_wc
655#endif
651 656
652static inline void dma_free_writecombine(struct device *dev, size_t size, 657static inline void dma_free_wc(struct device *dev, size_t size,
653 void *cpu_addr, dma_addr_t dma_addr) 658 void *cpu_addr, dma_addr_t dma_addr)
654{ 659{
655 DEFINE_DMA_ATTRS(attrs); 660 DEFINE_DMA_ATTRS(attrs);
656 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 661 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
657 return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); 662 return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
658} 663}
664#ifndef dma_free_writecombine
665#define dma_free_writecombine dma_free_wc
666#endif
659 667
660static inline int dma_mmap_writecombine(struct device *dev, 668static inline int dma_mmap_wc(struct device *dev,
661 struct vm_area_struct *vma, 669 struct vm_area_struct *vma,
662 void *cpu_addr, dma_addr_t dma_addr, 670 void *cpu_addr, dma_addr_t dma_addr,
663 size_t size) 671 size_t size)
664{ 672{
665 DEFINE_DMA_ATTRS(attrs); 673 DEFINE_DMA_ATTRS(attrs);
666 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 674 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
667 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); 675 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
668} 676}
677#ifndef dma_mmap_writecombine
678#define dma_mmap_writecombine dma_mmap_wc
679#endif
669 680
670#ifdef CONFIG_NEED_DMA_MAP_STATE 681#ifdef CONFIG_NEED_DMA_MAP_STATE
671#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 682#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 16a1cad30c33..017433712833 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -357,8 +357,8 @@ enum dma_slave_buswidth {
357 */ 357 */
358struct dma_slave_config { 358struct dma_slave_config {
359 enum dma_transfer_direction direction; 359 enum dma_transfer_direction direction;
360 dma_addr_t src_addr; 360 phys_addr_t src_addr;
361 dma_addr_t dst_addr; 361 phys_addr_t dst_addr;
362 enum dma_slave_buswidth src_addr_width; 362 enum dma_slave_buswidth src_addr_width;
363 enum dma_slave_buswidth dst_addr_width; 363 enum dma_slave_buswidth dst_addr_width;
364 u32 src_maxburst; 364 u32 src_maxburst;
@@ -401,6 +401,7 @@ enum dma_residue_granularity {
401 * since the enum dma_transfer_direction is not defined as bits for each 401 * since the enum dma_transfer_direction is not defined as bits for each
402 * type of direction, the dma controller should fill (1 << <TYPE>) and same 402 * type of direction, the dma controller should fill (1 << <TYPE>) and same
403 * should be checked by controller as well 403 * should be checked by controller as well
404 * @max_burst: max burst capability per-transfer
404 * @cmd_pause: true, if pause and thereby resume is supported 405 * @cmd_pause: true, if pause and thereby resume is supported
405 * @cmd_terminate: true, if terminate cmd is supported 406 * @cmd_terminate: true, if terminate cmd is supported
406 * @residue_granularity: granularity of the reported transfer residue 407 * @residue_granularity: granularity of the reported transfer residue
@@ -411,6 +412,7 @@ struct dma_slave_caps {
411 u32 src_addr_widths; 412 u32 src_addr_widths;
412 u32 dst_addr_widths; 413 u32 dst_addr_widths;
413 u32 directions; 414 u32 directions;
415 u32 max_burst;
414 bool cmd_pause; 416 bool cmd_pause;
415 bool cmd_terminate; 417 bool cmd_terminate;
416 enum dma_residue_granularity residue_granularity; 418 enum dma_residue_granularity residue_granularity;
@@ -654,6 +656,7 @@ struct dma_filter {
654 * the enum dma_transfer_direction is not defined as bits for 656 * the enum dma_transfer_direction is not defined as bits for
655 * each type of direction, the dma controller should fill (1 << 657 * each type of direction, the dma controller should fill (1 <<
656 * <TYPE>) and same should be checked by controller as well 658 * <TYPE>) and same should be checked by controller as well
659 * @max_burst: max burst capability per-transfer
657 * @residue_granularity: granularity of the transfer residue reported 660 * @residue_granularity: granularity of the transfer residue reported
658 * by tx_status 661 * by tx_status
659 * @device_alloc_chan_resources: allocate resources and return the 662 * @device_alloc_chan_resources: allocate resources and return the
@@ -712,6 +715,7 @@ struct dma_device {
712 u32 src_addr_widths; 715 u32 src_addr_widths;
713 u32 dst_addr_widths; 716 u32 dst_addr_widths;
714 u32 directions; 717 u32 directions;
718 u32 max_burst;
715 bool descriptor_reuse; 719 bool descriptor_reuse;
716 enum dma_residue_granularity residue_granularity; 720 enum dma_residue_granularity residue_granularity;
717 721
diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h
index ff8b55359648..0de21e935976 100644
--- a/include/linux/dqblk_qtree.h
+++ b/include/linux/dqblk_qtree.h
@@ -15,6 +15,7 @@
15#define QTREE_DEL_REWRITE 6 15#define QTREE_DEL_REWRITE 6
16 16
17struct dquot; 17struct dquot;
18struct kqid;
18 19
19/* Operations */ 20/* Operations */
20struct qtree_fmt_operations { 21struct qtree_fmt_operations {
@@ -52,5 +53,6 @@ static inline int qtree_depth(struct qtree_mem_dqinfo *info)
52 entries *= epb; 53 entries *= epb;
53 return i; 54 return i;
54} 55}
56int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid);
55 57
56#endif /* _LINUX_DQBLK_QTREE_H */ 58#endif /* _LINUX_DQBLK_QTREE_H */
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 06791811e49d..885f587a3555 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -3,16 +3,25 @@
3 * platform description for 93xx46 EEPROMs. 3 * platform description for 93xx46 EEPROMs.
4 */ 4 */
5 5
6struct gpio_desc;
7
6struct eeprom_93xx46_platform_data { 8struct eeprom_93xx46_platform_data {
7 unsigned char flags; 9 unsigned char flags;
8#define EE_ADDR8 0x01 /* 8 bit addr. cfg */ 10#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
9#define EE_ADDR16 0x02 /* 16 bit addr. cfg */ 11#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
10#define EE_READONLY 0x08 /* forbid writing */ 12#define EE_READONLY 0x08 /* forbid writing */
11 13
14 unsigned int quirks;
15/* Single word read transfers only; no sequential read. */
16#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
17/* Instructions such as EWEN are (addrlen + 2) in length. */
18#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
19
12 /* 20 /*
13 * optional hooks to control additional logic 21 * optional hooks to control additional logic
14 * before and after spi transfer. 22 * before and after spi transfer.
15 */ 23 */
16 void (*prepare)(void *); 24 void (*prepare)(void *);
17 void (*finish)(void *); 25 void (*finish)(void *);
26 struct gpio_desc *select;
18}; 27};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 47be3ad7d3e5..1626474567ac 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -97,6 +97,7 @@ typedef struct {
97#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ 97#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
98#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ 98#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
99#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ 99#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
100#define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */
100#define EFI_MEMORY_MORE_RELIABLE \ 101#define EFI_MEMORY_MORE_RELIABLE \
101 ((u64)0x0000000000010000ULL) /* higher reliability */ 102 ((u64)0x0000000000010000ULL) /* higher reliability */
102#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ 103#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
@@ -299,7 +300,7 @@ typedef struct {
299 void *open_protocol_information; 300 void *open_protocol_information;
300 void *protocols_per_handle; 301 void *protocols_per_handle;
301 void *locate_handle_buffer; 302 void *locate_handle_buffer;
302 void *locate_protocol; 303 efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
303 void *install_multiple_protocol_interfaces; 304 void *install_multiple_protocol_interfaces;
304 void *uninstall_multiple_protocol_interfaces; 305 void *uninstall_multiple_protocol_interfaces;
305 void *calculate_crc32; 306 void *calculate_crc32;
@@ -507,10 +508,6 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
507typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, 508typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
508 u32 attr, unsigned long data_size, 509 u32 attr, unsigned long data_size,
509 void *data); 510 void *data);
510typedef efi_status_t
511efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
512 u32 attr, unsigned long data_size, void *data);
513
514typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); 511typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
515typedef void efi_reset_system_t (int reset_type, efi_status_t status, 512typedef void efi_reset_system_t (int reset_type, efi_status_t status,
516 unsigned long data_size, efi_char16_t *data); 513 unsigned long data_size, efi_char16_t *data);
@@ -529,7 +526,9 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
529 unsigned long count, 526 unsigned long count,
530 u64 *max_size, 527 u64 *max_size,
531 int *reset_type); 528 int *reset_type);
532typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size); 529typedef efi_status_t efi_query_variable_store_t(u32 attributes,
530 unsigned long size,
531 bool nonblocking);
533 532
534void efi_native_runtime_setup(void); 533void efi_native_runtime_setup(void);
535 534
@@ -537,67 +536,92 @@ void efi_native_runtime_setup(void);
537 * EFI Configuration Table and GUID definitions 536 * EFI Configuration Table and GUID definitions
538 */ 537 */
539#define NULL_GUID \ 538#define NULL_GUID \
540 EFI_GUID( 0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ) 539 EFI_GUID(0x00000000, 0x0000, 0x0000, \
540 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
541 541
542#define MPS_TABLE_GUID \ 542#define MPS_TABLE_GUID \
543 EFI_GUID( 0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 543 EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \
544 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
544 545
545#define ACPI_TABLE_GUID \ 546#define ACPI_TABLE_GUID \
546 EFI_GUID( 0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 547 EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \
548 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
547 549
548#define ACPI_20_TABLE_GUID \ 550#define ACPI_20_TABLE_GUID \
549 EFI_GUID( 0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 ) 551 EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \
552 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
550 553
551#define SMBIOS_TABLE_GUID \ 554#define SMBIOS_TABLE_GUID \
552 EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 555 EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \
556 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
553 557
554#define SMBIOS3_TABLE_GUID \ 558#define SMBIOS3_TABLE_GUID \
555 EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 ) 559 EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \
560 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
556 561
557#define SAL_SYSTEM_TABLE_GUID \ 562#define SAL_SYSTEM_TABLE_GUID \
558 EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 563 EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \
564 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
559 565
560#define HCDP_TABLE_GUID \ 566#define HCDP_TABLE_GUID \
561 EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 ) 567 EFI_GUID(0xf951938d, 0x620b, 0x42ef, \
568 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
562 569
563#define UGA_IO_PROTOCOL_GUID \ 570#define UGA_IO_PROTOCOL_GUID \
564 EFI_GUID( 0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2 ) 571 EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \
572 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
565 573
566#define EFI_GLOBAL_VARIABLE_GUID \ 574#define EFI_GLOBAL_VARIABLE_GUID \
567 EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c ) 575 EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \
576 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
568 577
569#define UV_SYSTEM_TABLE_GUID \ 578#define UV_SYSTEM_TABLE_GUID \
570 EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 ) 579 EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \
580 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
571 581
572#define LINUX_EFI_CRASH_GUID \ 582#define LINUX_EFI_CRASH_GUID \
573 EFI_GUID( 0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0 ) 583 EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \
584 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
574 585
575#define LOADED_IMAGE_PROTOCOL_GUID \ 586#define LOADED_IMAGE_PROTOCOL_GUID \
576 EFI_GUID( 0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) 587 EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \
588 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
577 589
578#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \ 590#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \
579 EFI_GUID( 0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a ) 591 EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \
592 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
580 593
581#define EFI_UGA_PROTOCOL_GUID \ 594#define EFI_UGA_PROTOCOL_GUID \
582 EFI_GUID( 0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39 ) 595 EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \
596 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
583 597
584#define EFI_PCI_IO_PROTOCOL_GUID \ 598#define EFI_PCI_IO_PROTOCOL_GUID \
585 EFI_GUID( 0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x2, 0x9a ) 599 EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \
600 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
586 601
587#define EFI_FILE_INFO_ID \ 602#define EFI_FILE_INFO_ID \
588 EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) 603 EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \
604 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
589 605
590#define EFI_SYSTEM_RESOURCE_TABLE_GUID \ 606#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
591 EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 ) 607 EFI_GUID(0xb122a263, 0x3661, 0x4f68, \
608 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
592 609
593#define EFI_FILE_SYSTEM_GUID \ 610#define EFI_FILE_SYSTEM_GUID \
594 EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) 611 EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \
612 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
595 613
596#define DEVICE_TREE_GUID \ 614#define DEVICE_TREE_GUID \
597 EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 ) 615 EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \
616 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
598 617
599#define EFI_PROPERTIES_TABLE_GUID \ 618#define EFI_PROPERTIES_TABLE_GUID \
600 EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 ) 619 EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \
620 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
621
622#define EFI_RNG_PROTOCOL_GUID \
623 EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
624 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
601 625
602typedef struct { 626typedef struct {
603 efi_guid_t guid; 627 efi_guid_t guid;
@@ -851,8 +875,9 @@ extern struct efi {
851 efi_get_variable_t *get_variable; 875 efi_get_variable_t *get_variable;
852 efi_get_next_variable_t *get_next_variable; 876 efi_get_next_variable_t *get_next_variable;
853 efi_set_variable_t *set_variable; 877 efi_set_variable_t *set_variable;
854 efi_set_variable_nonblocking_t *set_variable_nonblocking; 878 efi_set_variable_t *set_variable_nonblocking;
855 efi_query_variable_info_t *query_variable_info; 879 efi_query_variable_info_t *query_variable_info;
880 efi_query_variable_info_t *query_variable_info_nonblocking;
856 efi_update_capsule_t *update_capsule; 881 efi_update_capsule_t *update_capsule;
857 efi_query_capsule_caps_t *query_capsule_caps; 882 efi_query_capsule_caps_t *query_capsule_caps;
858 efi_get_next_high_mono_count_t *get_next_high_mono_count; 883 efi_get_next_high_mono_count_t *get_next_high_mono_count;
@@ -884,13 +909,17 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
884#ifdef CONFIG_X86 909#ifdef CONFIG_X86
885extern void efi_late_init(void); 910extern void efi_late_init(void);
886extern void efi_free_boot_services(void); 911extern void efi_free_boot_services(void);
887extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); 912extern efi_status_t efi_query_variable_store(u32 attributes,
913 unsigned long size,
914 bool nonblocking);
888extern void efi_find_mirror(void); 915extern void efi_find_mirror(void);
889#else 916#else
890static inline void efi_late_init(void) {} 917static inline void efi_late_init(void) {}
891static inline void efi_free_boot_services(void) {} 918static inline void efi_free_boot_services(void) {}
892 919
893static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) 920static inline efi_status_t efi_query_variable_store(u32 attributes,
921 unsigned long size,
922 bool nonblocking)
894{ 923{
895 return EFI_SUCCESS; 924 return EFI_SUCCESS;
896} 925}
@@ -1091,7 +1120,7 @@ struct efivar_operations {
1091 efi_get_variable_t *get_variable; 1120 efi_get_variable_t *get_variable;
1092 efi_get_next_variable_t *get_next_variable; 1121 efi_get_next_variable_t *get_next_variable;
1093 efi_set_variable_t *set_variable; 1122 efi_set_variable_t *set_variable;
1094 efi_set_variable_nonblocking_t *set_variable_nonblocking; 1123 efi_set_variable_t *set_variable_nonblocking;
1095 efi_query_variable_store_t *query_variable_store; 1124 efi_query_variable_store_t *query_variable_store;
1096}; 1125};
1097 1126
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 653dc9c4ebac..e2b7bf27c03e 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -12,6 +12,7 @@
12#ifndef _LINUX_ETHTOOL_H 12#ifndef _LINUX_ETHTOOL_H
13#define _LINUX_ETHTOOL_H 13#define _LINUX_ETHTOOL_H
14 14
15#include <linux/bitmap.h>
15#include <linux/compat.h> 16#include <linux/compat.h>
16#include <uapi/linux/ethtool.h> 17#include <uapi/linux/ethtool.h>
17 18
@@ -40,9 +41,6 @@ struct compat_ethtool_rxnfc {
40 41
41#include <linux/rculist.h> 42#include <linux/rculist.h>
42 43
43extern int __ethtool_get_settings(struct net_device *dev,
44 struct ethtool_cmd *cmd);
45
46/** 44/**
47 * enum ethtool_phys_id_state - indicator state for physical identification 45 * enum ethtool_phys_id_state - indicator state for physical identification
48 * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated 46 * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated
@@ -97,13 +95,70 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
97 return index % n_rx_rings; 95 return index % n_rx_rings;
98} 96}
99 97
98/* number of link mode bits/ulongs handled internally by kernel */
99#define __ETHTOOL_LINK_MODE_MASK_NBITS \
100 (__ETHTOOL_LINK_MODE_LAST + 1)
101
102/* declare a link mode bitmap */
103#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
104 DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
105
106/* drivers must ignore base.cmd and base.link_mode_masks_nwords
107 * fields, but they are allowed to overwrite them (will be ignored).
108 */
109struct ethtool_link_ksettings {
110 struct ethtool_link_settings base;
111 struct {
112 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
113 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
114 __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
115 } link_modes;
116};
117
118/**
119 * ethtool_link_ksettings_zero_link_mode - clear link_ksettings link mode mask
120 * @ptr : pointer to struct ethtool_link_ksettings
121 * @name : one of supported/advertising/lp_advertising
122 */
123#define ethtool_link_ksettings_zero_link_mode(ptr, name) \
124 bitmap_zero((ptr)->link_modes.name, __ETHTOOL_LINK_MODE_MASK_NBITS)
125
126/**
127 * ethtool_link_ksettings_add_link_mode - set bit in link_ksettings
128 * link mode mask
129 * @ptr : pointer to struct ethtool_link_ksettings
130 * @name : one of supported/advertising/lp_advertising
131 * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
132 * (not atomic, no bound checking)
133 */
134#define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \
135 __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
136
137/**
138 * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
139 * @ptr : pointer to struct ethtool_link_ksettings
140 * @name : one of supported/advertising/lp_advertising
141 * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
142 * (not atomic, no bound checking)
143 *
144 * Returns true/false.
145 */
146#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \
147 test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
148
149extern int
150__ethtool_get_link_ksettings(struct net_device *dev,
151 struct ethtool_link_ksettings *link_ksettings);
152
100/** 153/**
101 * struct ethtool_ops - optional netdev operations 154 * struct ethtool_ops - optional netdev operations
102 * @get_settings: Get various device settings including Ethernet link 155 * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
156 * API. Get various device settings including Ethernet link
103 * settings. The @cmd parameter is expected to have been cleared 157 * settings. The @cmd parameter is expected to have been cleared
104 * before get_settings is called. Returns a negative error code or 158 * before get_settings is called. Returns a negative error code
105 * zero. 159 * or zero.
106 * @set_settings: Set various device settings including Ethernet link 160 * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
161 * API. Set various device settings including Ethernet link
107 * settings. Returns a negative error code or zero. 162 * settings. Returns a negative error code or zero.
108 * @get_drvinfo: Report driver/device information. Should only set the 163 * @get_drvinfo: Report driver/device information. Should only set the
109 * @driver, @version, @fw_version and @bus_info fields. If not 164 * @driver, @version, @fw_version and @bus_info fields. If not
@@ -201,6 +256,29 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
201 * @get_module_eeprom: Get the eeprom information from the plug-in module 256 * @get_module_eeprom: Get the eeprom information from the plug-in module
202 * @get_eee: Get Energy-Efficient (EEE) supported and status. 257 * @get_eee: Get Energy-Efficient (EEE) supported and status.
203 * @set_eee: Set EEE status (enable/disable) as well as LPI timers. 258 * @set_eee: Set EEE status (enable/disable) as well as LPI timers.
259 * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
260 * It must check that the given queue number is valid. If neither a RX nor
261 * a TX queue has this number, return -EINVAL. If only a RX queue or a TX
262 * queue has this number, set the inapplicable fields to ~0 and return 0.
263 * Returns a negative error code or zero.
264 * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
265 * It must check that the given queue number is valid. If neither a RX nor
266 * a TX queue has this number, return -EINVAL. If only a RX queue or a TX
267 * queue has this number, ignore the inapplicable fields.
268 * Returns a negative error code or zero.
269 * @get_link_ksettings: When defined, takes precedence over the
270 * %get_settings method. Get various device settings
271 * including Ethernet link settings. The %cmd and
272 * %link_mode_masks_nwords fields should be ignored (use
273 * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
274 * change to them will be overwritten by kernel. Returns a
275 * negative error code or zero.
276 * @set_link_ksettings: When defined, takes precedence over the
277 * %set_settings method. Set various device settings including
278 * Ethernet link settings. The %cmd and %link_mode_masks_nwords
279 * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
280 * instead of the latter), any change to them will be overwritten
281 * by kernel. Returns a negative error code or zero.
204 * 282 *
205 * All operations are optional (i.e. the function pointer may be set 283 * All operations are optional (i.e. the function pointer may be set
206 * to %NULL) and callers must take this into account. Callers must 284 * to %NULL) and callers must take this into account. Callers must
@@ -279,7 +357,13 @@ struct ethtool_ops {
279 const struct ethtool_tunable *, void *); 357 const struct ethtool_tunable *, void *);
280 int (*set_tunable)(struct net_device *, 358 int (*set_tunable)(struct net_device *,
281 const struct ethtool_tunable *, const void *); 359 const struct ethtool_tunable *, const void *);
282 360 int (*get_per_queue_coalesce)(struct net_device *, u32,
283 361 struct ethtool_coalesce *);
362 int (*set_per_queue_coalesce)(struct net_device *, u32,
363 struct ethtool_coalesce *);
364 int (*get_link_ksettings)(struct net_device *,
365 struct ethtool_link_ksettings *);
366 int (*set_link_ksettings)(struct net_device *,
367 const struct ethtool_link_ksettings *);
284}; 368};
285#endif /* _LINUX_ETHTOOL_H */ 369#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index fa05e04c5531..d8414502edb4 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -97,6 +97,12 @@ enum fid_type {
97 FILEID_FAT_WITH_PARENT = 0x72, 97 FILEID_FAT_WITH_PARENT = 0x72,
98 98
99 /* 99 /*
100 * 128 bit child FID (struct lu_fid)
101 * 128 bit parent FID (struct lu_fid)
102 */
103 FILEID_LUSTRE = 0x97,
104
105 /*
100 * Filesystems must not use 0xff file ID. 106 * Filesystems must not use 0xff file ID.
101 */ 107 */
102 FILEID_INVALID = 0xff, 108 FILEID_INVALID = 0xff,
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index e59c3be92106..b90e9bdbd1dd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,7 +21,7 @@
21#define F2FS_BLKSIZE 4096 /* support only 4KB block */ 21#define F2FS_BLKSIZE 4096 /* support only 4KB block */
22#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ 22#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
23#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ 23#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
24#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) 24#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
25 25
26#define NULL_ADDR ((block_t)0) /* used as block_t addresses */ 26#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
27#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ 27#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -170,12 +170,12 @@ struct f2fs_extent {
170#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ 170#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
171#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ 171#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
172#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ 172#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
173#define ADDRS_PER_INODE(fi) addrs_per_inode(fi) 173#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
174#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ 174#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
175#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ 175#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
176 176
177#define ADDRS_PER_PAGE(page, fi) \ 177#define ADDRS_PER_PAGE(page, inode) \
178 (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK) 178 (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
179 179
180#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) 180#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
181#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) 181#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -262,7 +262,7 @@ struct f2fs_node {
262/* 262/*
263 * For NAT entries 263 * For NAT entries
264 */ 264 */
265#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) 265#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
266 266
267struct f2fs_nat_entry { 267struct f2fs_nat_entry {
268 __u8 version; /* latest version of cached nat entry */ 268 __u8 version; /* latest version of cached nat entry */
@@ -282,7 +282,7 @@ struct f2fs_nat_block {
282 * Not allow to change this. 282 * Not allow to change this.
283 */ 283 */
284#define SIT_VBLOCK_MAP_SIZE 64 284#define SIT_VBLOCK_MAP_SIZE 64
285#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) 285#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
286 286
287/* 287/*
288 * Note that f2fs_sit_entry->vblocks has the following bit-field information. 288 * Note that f2fs_sit_entry->vblocks has the following bit-field information.
@@ -345,7 +345,7 @@ struct f2fs_summary {
345 345
346struct summary_footer { 346struct summary_footer {
347 unsigned char entry_type; /* SUM_TYPE_XXX */ 347 unsigned char entry_type; /* SUM_TYPE_XXX */
348 __u32 check_sum; /* summary checksum */ 348 __le32 check_sum; /* summary checksum */
349} __packed; 349} __packed;
350 350
351#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\ 351#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -358,6 +358,12 @@ struct summary_footer {
358 sizeof(struct sit_journal_entry)) 358 sizeof(struct sit_journal_entry))
359#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ 359#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
360 sizeof(struct sit_journal_entry)) 360 sizeof(struct sit_journal_entry))
361
362/* Reserved area should make size of f2fs_extra_info equals to
363 * that of nat_journal and sit_journal.
364 */
365#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8)
366
361/* 367/*
362 * frequently updated NAT/SIT entries can be stored in the spare area in 368 * frequently updated NAT/SIT entries can be stored in the spare area in
363 * summary blocks 369 * summary blocks
@@ -387,18 +393,28 @@ struct sit_journal {
387 __u8 reserved[SIT_JOURNAL_RESERVED]; 393 __u8 reserved[SIT_JOURNAL_RESERVED];
388} __packed; 394} __packed;
389 395
390/* 4KB-sized summary block structure */ 396struct f2fs_extra_info {
391struct f2fs_summary_block { 397 __le64 kbytes_written;
392 struct f2fs_summary entries[ENTRIES_IN_SUM]; 398 __u8 reserved[EXTRA_INFO_RESERVED];
399} __packed;
400
401struct f2fs_journal {
393 union { 402 union {
394 __le16 n_nats; 403 __le16 n_nats;
395 __le16 n_sits; 404 __le16 n_sits;
396 }; 405 };
397 /* spare area is used by NAT or SIT journals */ 406 /* spare area is used by NAT or SIT journals or extra info */
398 union { 407 union {
399 struct nat_journal nat_j; 408 struct nat_journal nat_j;
400 struct sit_journal sit_j; 409 struct sit_journal sit_j;
410 struct f2fs_extra_info info;
401 }; 411 };
412} __packed;
413
414/* 4KB-sized summary block structure */
415struct f2fs_summary_block {
416 struct f2fs_summary entries[ENTRIES_IN_SUM];
417 struct f2fs_journal journal;
402 struct summary_footer footer; 418 struct summary_footer footer;
403} __packed; 419} __packed;
404 420
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 3159a7dba034..9f4956d8601c 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -62,10 +62,9 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
62#endif /* CONFIG_FAULT_INJECTION */ 62#endif /* CONFIG_FAULT_INJECTION */
63 63
64#ifdef CONFIG_FAILSLAB 64#ifdef CONFIG_FAILSLAB
65extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags); 65extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
66#else 66#else
67static inline bool should_failslab(size_t size, gfp_t gfpflags, 67static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
68 unsigned long flags)
69{ 68{
70 return false; 69 return false;
71} 70}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 55433f86f0a3..dfe88351341f 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -296,9 +296,6 @@ struct fb_ops {
296 /* Draws cursor */ 296 /* Draws cursor */
297 int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); 297 int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor);
298 298
299 /* Rotates the display */
300 void (*fb_rotate)(struct fb_info *info, int angle);
301
302 /* wait for blit idle, optional */ 299 /* wait for blit idle, optional */
303 int (*fb_sync)(struct fb_info *info); 300 int (*fb_sync)(struct fb_info *info);
304 301
diff --git a/include/linux/fence.h b/include/linux/fence.h
index bb522011383b..2b17698b60b8 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -79,6 +79,8 @@ struct fence {
79 unsigned long flags; 79 unsigned long flags;
80 ktime_t timestamp; 80 ktime_t timestamp;
81 int status; 81 int status;
82 struct list_head child_list;
83 struct list_head active_list;
82}; 84};
83 85
84enum fence_flag_bits { 86enum fence_flag_bits {
@@ -292,7 +294,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2)
292 if (WARN_ON(f1->context != f2->context)) 294 if (WARN_ON(f1->context != f2->context))
293 return false; 295 return false;
294 296
295 return f1->seqno - f2->seqno < INT_MAX; 297 return (int)(f1->seqno - f2->seqno) > 0;
296} 298}
297 299
298/** 300/**
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 43aa1f8855c7..a51a5361695f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
465void bpf_prog_destroy(struct bpf_prog *fp); 465void bpf_prog_destroy(struct bpf_prog *fp);
466 466
467int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 467int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
468int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
469 bool locked);
468int sk_attach_bpf(u32 ufd, struct sock *sk); 470int sk_attach_bpf(u32 ufd, struct sock *sk);
469int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); 471int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
470int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); 472int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
471int sk_detach_filter(struct sock *sk); 473int sk_detach_filter(struct sock *sk);
474int __sk_detach_filter(struct sock *sk, bool locked);
475
472int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 476int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
473 unsigned int len); 477 unsigned int len);
474 478
diff --git a/include/linux/frame.h b/include/linux/frame.h
new file mode 100644
index 000000000000..e6baaba3f1ae
--- /dev/null
+++ b/include/linux/frame.h
@@ -0,0 +1,23 @@
1#ifndef _LINUX_FRAME_H
2#define _LINUX_FRAME_H
3
4#ifdef CONFIG_STACK_VALIDATION
5/*
6 * This macro marks the given function's stack frame as "non-standard", which
7 * tells objtool to ignore the function when doing stack metadata validation.
8 * It should only be used in special cases where you're 100% sure it won't
9 * affect the reliability of frame pointers and kernel stack traces.
10 *
11 * For more information, see tools/objtool/Documentation/stack-validation.txt.
12 */
13#define STACK_FRAME_NON_STANDARD(func) \
14 static void __used __section(__func_stack_frame_non_standard) \
15 *__func_stack_frame_non_standard_##func = func
16
17#else /* !CONFIG_STACK_VALIDATION */
18
19#define STACK_FRAME_NON_STANDARD(func)
20
21#endif /* CONFIG_STACK_VALIDATION */
22
23#endif /* _LINUX_FRAME_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 6b7fd9cf5ea2..dd03e837ebb7 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
231 * call this with locks held. 231 * call this with locks held.
232 */ 232 */
233static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, 233static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
234 unsigned long delta, const enum hrtimer_mode mode) 234 u64 delta, const enum hrtimer_mode mode)
235{ 235{
236 int __retval; 236 int __retval;
237 freezer_do_not_count(); 237 freezer_do_not_count();
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ae681002100a..70e61b58baaf 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -53,6 +53,8 @@ struct swap_info_struct;
53struct seq_file; 53struct seq_file;
54struct workqueue_struct; 54struct workqueue_struct;
55struct iov_iter; 55struct iov_iter;
56struct fscrypt_info;
57struct fscrypt_operations;
56 58
57extern void __init inode_init(void); 59extern void __init inode_init(void);
58extern void __init inode_init_early(void); 60extern void __init inode_init_early(void);
@@ -70,7 +72,7 @@ extern int sysctl_protected_hardlinks;
70struct buffer_head; 72struct buffer_head;
71typedef int (get_block_t)(struct inode *inode, sector_t iblock, 73typedef int (get_block_t)(struct inode *inode, sector_t iblock,
72 struct buffer_head *bh_result, int create); 74 struct buffer_head *bh_result, int create);
73typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, 75typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
74 ssize_t bytes, void *private); 76 ssize_t bytes, void *private);
75typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); 77typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
76 78
@@ -320,6 +322,7 @@ struct writeback_control;
320#define IOCB_EVENTFD (1 << 0) 322#define IOCB_EVENTFD (1 << 0)
321#define IOCB_APPEND (1 << 1) 323#define IOCB_APPEND (1 << 1)
322#define IOCB_DIRECT (1 << 2) 324#define IOCB_DIRECT (1 << 2)
325#define IOCB_HIPRI (1 << 3)
323 326
324struct kiocb { 327struct kiocb {
325 struct file *ki_filp; 328 struct file *ki_filp;
@@ -678,6 +681,10 @@ struct inode {
678 struct hlist_head i_fsnotify_marks; 681 struct hlist_head i_fsnotify_marks;
679#endif 682#endif
680 683
684#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
685 struct fscrypt_info *i_crypt_info;
686#endif
687
681 void *i_private; /* fs or device private pointer */ 688 void *i_private; /* fs or device private pointer */
682}; 689};
683 690
@@ -922,7 +929,7 @@ static inline struct file *get_file(struct file *f)
922/* Page cache limit. The filesystems should put that into their s_maxbytes 929/* Page cache limit. The filesystems should put that into their s_maxbytes
923 limits, otherwise bad things can happen in VM. */ 930 limits, otherwise bad things can happen in VM. */
924#if BITS_PER_LONG==32 931#if BITS_PER_LONG==32
925#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 932#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
926#elif BITS_PER_LONG==64 933#elif BITS_PER_LONG==64
927#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) 934#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
928#endif 935#endif
@@ -1234,6 +1241,16 @@ static inline struct inode *file_inode(const struct file *f)
1234 return f->f_inode; 1241 return f->f_inode;
1235} 1242}
1236 1243
1244static inline struct dentry *file_dentry(const struct file *file)
1245{
1246 struct dentry *dentry = file->f_path.dentry;
1247
1248 if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
1249 return dentry->d_op->d_real(dentry, file_inode(file));
1250 else
1251 return dentry;
1252}
1253
1237static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) 1254static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
1238{ 1255{
1239 return locks_lock_inode_wait(file_inode(filp), fl); 1256 return locks_lock_inode_wait(file_inode(filp), fl);
@@ -1323,6 +1340,8 @@ struct super_block {
1323#endif 1340#endif
1324 const struct xattr_handler **s_xattr; 1341 const struct xattr_handler **s_xattr;
1325 1342
1343 const struct fscrypt_operations *s_cop;
1344
1326 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ 1345 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1327 struct list_head s_mounts; /* list of mounts; _not_ for fs use */ 1346 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1328 struct block_device *s_bdev; 1347 struct block_device *s_bdev;
@@ -1540,11 +1559,6 @@ extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct de
1540extern int vfs_whiteout(struct inode *, struct dentry *); 1559extern int vfs_whiteout(struct inode *, struct dentry *);
1541 1560
1542/* 1561/*
1543 * VFS dentry helper functions.
1544 */
1545extern void dentry_unhash(struct dentry *dentry);
1546
1547/*
1548 * VFS file helper functions. 1562 * VFS file helper functions.
1549 */ 1563 */
1550extern void inode_init_owner(struct inode *inode, const struct inode *dir, 1564extern void inode_init_owner(struct inode *inode, const struct inode *dir,
@@ -1709,9 +1723,9 @@ extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *)
1709extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); 1723extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
1710extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); 1724extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
1711extern ssize_t vfs_readv(struct file *, const struct iovec __user *, 1725extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
1712 unsigned long, loff_t *); 1726 unsigned long, loff_t *, int);
1713extern ssize_t vfs_writev(struct file *, const struct iovec __user *, 1727extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
1714 unsigned long, loff_t *); 1728 unsigned long, loff_t *, int);
1715extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, 1729extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
1716 loff_t, size_t, unsigned int); 1730 loff_t, size_t, unsigned int);
1717extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, 1731extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
@@ -2063,7 +2077,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int);
2063/* /sys/fs */ 2077/* /sys/fs */
2064extern struct kobject *fs_kobj; 2078extern struct kobject *fs_kobj;
2065 2079
2066#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) 2080#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
2067 2081
2068#ifdef CONFIG_MANDATORY_FILE_LOCKING 2082#ifdef CONFIG_MANDATORY_FILE_LOCKING
2069extern int locks_mandatory_locked(struct file *); 2083extern int locks_mandatory_locked(struct file *);
@@ -2259,7 +2273,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
2259extern struct file *file_open_name(struct filename *, int, umode_t); 2273extern struct file *file_open_name(struct filename *, int, umode_t);
2260extern struct file *filp_open(const char *, int, umode_t); 2274extern struct file *filp_open(const char *, int, umode_t);
2261extern struct file *file_open_root(struct dentry *, struct vfsmount *, 2275extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2262 const char *, int); 2276 const char *, int, umode_t);
2263extern struct file * dentry_open(const struct path *, int, const struct cred *); 2277extern struct file * dentry_open(const struct path *, int, const struct cred *);
2264extern int filp_close(struct file *, fl_owner_t id); 2278extern int filp_close(struct file *, fl_owner_t id);
2265 2279
@@ -2576,7 +2590,22 @@ static inline void i_readcount_inc(struct inode *inode)
2576#endif 2590#endif
2577extern int do_pipe_flags(int *, int); 2591extern int do_pipe_flags(int *, int);
2578 2592
2593enum kernel_read_file_id {
2594 READING_FIRMWARE = 1,
2595 READING_MODULE,
2596 READING_KEXEC_IMAGE,
2597 READING_KEXEC_INITRAMFS,
2598 READING_POLICY,
2599 READING_MAX_ID
2600};
2601
2579extern int kernel_read(struct file *, loff_t, char *, unsigned long); 2602extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2603extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
2604 enum kernel_read_file_id);
2605extern int kernel_read_file_from_path(char *, void **, loff_t *, loff_t,
2606 enum kernel_read_file_id);
2607extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
2608 enum kernel_read_file_id);
2580extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t); 2609extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
2581extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); 2610extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
2582extern struct file * open_exec(const char *); 2611extern struct file * open_exec(const char *);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
new file mode 100644
index 000000000000..6027f6bbb061
--- /dev/null
+++ b/include/linux/fscrypto.h
@@ -0,0 +1,435 @@
1/*
2 * General per-file encryption definition
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * Written by Michael Halcrow, 2015.
7 * Modified by Jaegeuk Kim, 2015.
8 */
9
10#ifndef _LINUX_FSCRYPTO_H
11#define _LINUX_FSCRYPTO_H
12
13#include <linux/key.h>
14#include <linux/fs.h>
15#include <linux/mm.h>
16#include <linux/bio.h>
17#include <linux/dcache.h>
18#include <crypto/skcipher.h>
19#include <uapi/linux/fs.h>
20
21#define FS_KEY_DERIVATION_NONCE_SIZE 16
22#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
23
24#define FS_POLICY_FLAGS_PAD_4 0x00
25#define FS_POLICY_FLAGS_PAD_8 0x01
26#define FS_POLICY_FLAGS_PAD_16 0x02
27#define FS_POLICY_FLAGS_PAD_32 0x03
28#define FS_POLICY_FLAGS_PAD_MASK 0x03
29#define FS_POLICY_FLAGS_VALID 0x03
30
31/* Encryption algorithms */
32#define FS_ENCRYPTION_MODE_INVALID 0
33#define FS_ENCRYPTION_MODE_AES_256_XTS 1
34#define FS_ENCRYPTION_MODE_AES_256_GCM 2
35#define FS_ENCRYPTION_MODE_AES_256_CBC 3
36#define FS_ENCRYPTION_MODE_AES_256_CTS 4
37
38/**
39 * Encryption context for inode
40 *
41 * Protector format:
42 * 1 byte: Protector format (1 = this version)
43 * 1 byte: File contents encryption mode
44 * 1 byte: File names encryption mode
45 * 1 byte: Flags
46 * 8 bytes: Master Key descriptor
47 * 16 bytes: Encryption Key derivation nonce
48 */
49struct fscrypt_context {
50 u8 format;
51 u8 contents_encryption_mode;
52 u8 filenames_encryption_mode;
53 u8 flags;
54 u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
55 u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
56} __packed;
57
58/* Encryption parameters */
59#define FS_XTS_TWEAK_SIZE 16
60#define FS_AES_128_ECB_KEY_SIZE 16
61#define FS_AES_256_GCM_KEY_SIZE 32
62#define FS_AES_256_CBC_KEY_SIZE 32
63#define FS_AES_256_CTS_KEY_SIZE 32
64#define FS_AES_256_XTS_KEY_SIZE 64
65#define FS_MAX_KEY_SIZE 64
66
67#define FS_KEY_DESC_PREFIX "fscrypt:"
68#define FS_KEY_DESC_PREFIX_SIZE 8
69
70/* This is passed in from userspace into the kernel keyring */
71struct fscrypt_key {
72 u32 mode;
73 u8 raw[FS_MAX_KEY_SIZE];
74 u32 size;
75} __packed;
76
77struct fscrypt_info {
78 u8 ci_data_mode;
79 u8 ci_filename_mode;
80 u8 ci_flags;
81 struct crypto_skcipher *ci_ctfm;
82 struct key *ci_keyring_key;
83 u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
84};
85
86#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
87#define FS_WRITE_PATH_FL 0x00000002
88
89struct fscrypt_ctx {
90 union {
91 struct {
92 struct page *bounce_page; /* Ciphertext page */
93 struct page *control_page; /* Original page */
94 } w;
95 struct {
96 struct bio *bio;
97 struct work_struct work;
98 } r;
99 struct list_head free_list; /* Free list */
100 };
101 u8 flags; /* Flags */
102 u8 mode; /* Encryption mode for tfm */
103};
104
105struct fscrypt_completion_result {
106 struct completion completion;
107 int res;
108};
109
110#define DECLARE_FS_COMPLETION_RESULT(ecr) \
111 struct fscrypt_completion_result ecr = { \
112 COMPLETION_INITIALIZER((ecr).completion), 0 }
113
114static inline int fscrypt_key_size(int mode)
115{
116 switch (mode) {
117 case FS_ENCRYPTION_MODE_AES_256_XTS:
118 return FS_AES_256_XTS_KEY_SIZE;
119 case FS_ENCRYPTION_MODE_AES_256_GCM:
120 return FS_AES_256_GCM_KEY_SIZE;
121 case FS_ENCRYPTION_MODE_AES_256_CBC:
122 return FS_AES_256_CBC_KEY_SIZE;
123 case FS_ENCRYPTION_MODE_AES_256_CTS:
124 return FS_AES_256_CTS_KEY_SIZE;
125 default:
126 BUG();
127 }
128 return 0;
129}
130
131#define FS_FNAME_NUM_SCATTER_ENTRIES 4
132#define FS_CRYPTO_BLOCK_SIZE 16
133#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
134
135/**
136 * For encrypted symlinks, the ciphertext length is stored at the beginning
137 * of the string in little-endian format.
138 */
139struct fscrypt_symlink_data {
140 __le16 len;
141 char encrypted_path[1];
142} __packed;
143
144/**
145 * This function is used to calculate the disk space required to
146 * store a filename of length l in encrypted symlink format.
147 */
148static inline u32 fscrypt_symlink_data_len(u32 l)
149{
150 if (l < FS_CRYPTO_BLOCK_SIZE)
151 l = FS_CRYPTO_BLOCK_SIZE;
152 return (l + sizeof(struct fscrypt_symlink_data) - 1);
153}
154
155struct fscrypt_str {
156 unsigned char *name;
157 u32 len;
158};
159
160struct fscrypt_name {
161 const struct qstr *usr_fname;
162 struct fscrypt_str disk_name;
163 u32 hash;
164 u32 minor_hash;
165 struct fscrypt_str crypto_buf;
166};
167
168#define FSTR_INIT(n, l) { .name = n, .len = l }
169#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
170#define fname_name(p) ((p)->disk_name.name)
171#define fname_len(p) ((p)->disk_name.len)
172
173/*
174 * crypto opertions for filesystems
175 */
176struct fscrypt_operations {
177 int (*get_context)(struct inode *, void *, size_t);
178 int (*prepare_context)(struct inode *);
179 int (*set_context)(struct inode *, const void *, size_t, void *);
180 int (*dummy_context)(struct inode *);
181 bool (*is_encrypted)(struct inode *);
182 bool (*empty_dir)(struct inode *);
183 unsigned (*max_namelen)(struct inode *);
184};
185
186static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
187{
188 if (inode->i_sb->s_cop->dummy_context &&
189 inode->i_sb->s_cop->dummy_context(inode))
190 return true;
191 return false;
192}
193
194static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
195{
196 return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
197}
198
199static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
200{
201 return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
202}
203
204static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
205{
206 if (size == fscrypt_key_size(mode))
207 return size;
208 return 0;
209}
210
211static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
212{
213 if (str->len == 1 && str->name[0] == '.')
214 return true;
215
216 if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
217 return true;
218
219 return false;
220}
221
222static inline struct page *fscrypt_control_page(struct page *page)
223{
224#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
225 return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
226#else
227 WARN_ON_ONCE(1);
228 return ERR_PTR(-EINVAL);
229#endif
230}
231
232static inline int fscrypt_has_encryption_key(struct inode *inode)
233{
234#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
235 return (inode->i_crypt_info != NULL);
236#else
237 return 0;
238#endif
239}
240
241static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
242{
243#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
244 spin_lock(&dentry->d_lock);
245 dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
246 spin_unlock(&dentry->d_lock);
247#endif
248}
249
250#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
251extern const struct dentry_operations fscrypt_d_ops;
252#endif
253
254static inline void fscrypt_set_d_op(struct dentry *dentry)
255{
256#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
257 d_set_d_op(dentry, &fscrypt_d_ops);
258#endif
259}
260
261#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
262/* crypto.c */
263extern struct kmem_cache *fscrypt_info_cachep;
264int fscrypt_initialize(void);
265
266extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
267extern void fscrypt_release_ctx(struct fscrypt_ctx *);
268extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
269extern int fscrypt_decrypt_page(struct page *);
270extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
271extern void fscrypt_pullback_bio_page(struct page **, bool);
272extern void fscrypt_restore_control_page(struct page *);
273extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
274 unsigned int);
275/* policy.c */
276extern int fscrypt_process_policy(struct inode *,
277 const struct fscrypt_policy *);
278extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
279extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
280extern int fscrypt_inherit_context(struct inode *, struct inode *,
281 void *, bool);
282/* keyinfo.c */
283extern int get_crypt_info(struct inode *);
284extern int fscrypt_get_encryption_info(struct inode *);
285extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
286
287/* fname.c */
288extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
289 int lookup, struct fscrypt_name *);
290extern void fscrypt_free_filename(struct fscrypt_name *);
291extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
292extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
293 struct fscrypt_str *);
294extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
295extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
296 const struct fscrypt_str *, struct fscrypt_str *);
297extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
298 struct fscrypt_str *);
299#endif
300
301/* crypto.c */
302static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
303 gfp_t f)
304{
305 return ERR_PTR(-EOPNOTSUPP);
306}
307
308static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
309{
310 return;
311}
312
313static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
314 struct page *p, gfp_t f)
315{
316 return ERR_PTR(-EOPNOTSUPP);
317}
318
319static inline int fscrypt_notsupp_decrypt_page(struct page *p)
320{
321 return -EOPNOTSUPP;
322}
323
324static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
325 struct bio *b)
326{
327 return;
328}
329
330static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
331{
332 return;
333}
334
335static inline void fscrypt_notsupp_restore_control_page(struct page *p)
336{
337 return;
338}
339
340static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
341 sector_t s, unsigned int f)
342{
343 return -EOPNOTSUPP;
344}
345
346/* policy.c */
347static inline int fscrypt_notsupp_process_policy(struct inode *i,
348 const struct fscrypt_policy *p)
349{
350 return -EOPNOTSUPP;
351}
352
353static inline int fscrypt_notsupp_get_policy(struct inode *i,
354 struct fscrypt_policy *p)
355{
356 return -EOPNOTSUPP;
357}
358
359static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
360 struct inode *i)
361{
362 return 0;
363}
364
365static inline int fscrypt_notsupp_inherit_context(struct inode *p,
366 struct inode *i, void *v, bool b)
367{
368 return -EOPNOTSUPP;
369}
370
371/* keyinfo.c */
372static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
373{
374 return -EOPNOTSUPP;
375}
376
377static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
378 struct fscrypt_info *f)
379{
380 return;
381}
382
383 /* fname.c */
384static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
385 const struct qstr *iname,
386 int lookup, struct fscrypt_name *fname)
387{
388 if (dir->i_sb->s_cop->is_encrypted(dir))
389 return -EOPNOTSUPP;
390
391 memset(fname, 0, sizeof(struct fscrypt_name));
392 fname->usr_fname = iname;
393 fname->disk_name.name = (unsigned char *)iname->name;
394 fname->disk_name.len = iname->len;
395 return 0;
396}
397
398static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
399{
400 return;
401}
402
403static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
404{
405 /* never happens */
406 WARN_ON(1);
407 return 0;
408}
409
410static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
411 u32 ilen, struct fscrypt_str *crypto_str)
412{
413 return -EOPNOTSUPP;
414}
415
416static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
417{
418 return;
419}
420
421static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
422 u32 hash, u32 minor_hash,
423 const struct fscrypt_str *iname,
424 struct fscrypt_str *oname)
425{
426 return -EOPNOTSUPP;
427}
428
429static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
430 const struct qstr *iname,
431 struct fscrypt_str *oname)
432{
433 return -EOPNOTSUPP;
434}
435#endif /* _LINUX_FSCRYPTO_H */
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 84d971ff3fba..649e9171a9b3 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -189,4 +189,109 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
189 189
190#endif 190#endif
191 191
192struct ccsr_rcpm_v1 {
193 u8 res0000[4];
194 __be32 cdozsr; /* 0x0004 Core Doze Status Register */
195 u8 res0008[4];
196 __be32 cdozcr; /* 0x000c Core Doze Control Register */
197 u8 res0010[4];
198 __be32 cnapsr; /* 0x0014 Core Nap Status Register */
199 u8 res0018[4];
200 __be32 cnapcr; /* 0x001c Core Nap Control Register */
201 u8 res0020[4];
202 __be32 cdozpsr; /* 0x0024 Core Doze Previous Status Register */
203 u8 res0028[4];
204 __be32 cnappsr; /* 0x002c Core Nap Previous Status Register */
205 u8 res0030[4];
206 __be32 cwaitsr; /* 0x0034 Core Wait Status Register */
207 u8 res0038[4];
208 __be32 cwdtdsr; /* 0x003c Core Watchdog Detect Status Register */
209 __be32 powmgtcsr; /* 0x0040 PM Control&Status Register */
210#define RCPM_POWMGTCSR_SLP 0x00020000
211 u8 res0044[12];
212 __be32 ippdexpcr; /* 0x0050 IP Powerdown Exception Control Register */
213 u8 res0054[16];
214 __be32 cpmimr; /* 0x0064 Core PM IRQ Mask Register */
215 u8 res0068[4];
216 __be32 cpmcimr; /* 0x006c Core PM Critical IRQ Mask Register */
217 u8 res0070[4];
218 __be32 cpmmcmr; /* 0x0074 Core PM Machine Check Mask Register */
219 u8 res0078[4];
220 __be32 cpmnmimr; /* 0x007c Core PM NMI Mask Register */
221 u8 res0080[4];
222 __be32 ctbenr; /* 0x0084 Core Time Base Enable Register */
223 u8 res0088[4];
224 __be32 ctbckselr; /* 0x008c Core Time Base Clock Select Register */
225 u8 res0090[4];
226 __be32 ctbhltcr; /* 0x0094 Core Time Base Halt Control Register */
227 u8 res0098[4];
228 __be32 cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */
229};
230
231struct ccsr_rcpm_v2 {
232 u8 res_00[12];
233 __be32 tph10sr0; /* Thread PH10 Status Register */
234 u8 res_10[12];
235 __be32 tph10setr0; /* Thread PH10 Set Control Register */
236 u8 res_20[12];
237 __be32 tph10clrr0; /* Thread PH10 Clear Control Register */
238 u8 res_30[12];
239 __be32 tph10psr0; /* Thread PH10 Previous Status Register */
240 u8 res_40[12];
241 __be32 twaitsr0; /* Thread Wait Status Register */
242 u8 res_50[96];
243 __be32 pcph15sr; /* Physical Core PH15 Status Register */
244 __be32 pcph15setr; /* Physical Core PH15 Set Control Register */
245 __be32 pcph15clrr; /* Physical Core PH15 Clear Control Register */
246 __be32 pcph15psr; /* Physical Core PH15 Prev Status Register */
247 u8 res_c0[16];
248 __be32 pcph20sr; /* Physical Core PH20 Status Register */
249 __be32 pcph20setr; /* Physical Core PH20 Set Control Register */
250 __be32 pcph20clrr; /* Physical Core PH20 Clear Control Register */
251 __be32 pcph20psr; /* Physical Core PH20 Prev Status Register */
252 __be32 pcpw20sr; /* Physical Core PW20 Status Register */
253 u8 res_e0[12];
254 __be32 pcph30sr; /* Physical Core PH30 Status Register */
255 __be32 pcph30setr; /* Physical Core PH30 Set Control Register */
256 __be32 pcph30clrr; /* Physical Core PH30 Clear Control Register */
257 __be32 pcph30psr; /* Physical Core PH30 Prev Status Register */
258 u8 res_100[32];
259 __be32 ippwrgatecr; /* IP Power Gating Control Register */
260 u8 res_124[12];
261 __be32 powmgtcsr; /* Power Management Control & Status Reg */
262#define RCPM_POWMGTCSR_LPM20_RQ 0x00100000
263#define RCPM_POWMGTCSR_LPM20_ST 0x00000200
264#define RCPM_POWMGTCSR_P_LPM20_ST 0x00000100
265 u8 res_134[12];
266 __be32 ippdexpcr[4]; /* IP Powerdown Exception Control Reg */
267 u8 res_150[12];
268 __be32 tpmimr0; /* Thread PM Interrupt Mask Reg */
269 u8 res_160[12];
270 __be32 tpmcimr0; /* Thread PM Crit Interrupt Mask Reg */
271 u8 res_170[12];
272 __be32 tpmmcmr0; /* Thread PM Machine Check Interrupt Mask Reg */
273 u8 res_180[12];
274 __be32 tpmnmimr0; /* Thread PM NMI Mask Reg */
275 u8 res_190[12];
276 __be32 tmcpmaskcr0; /* Thread Machine Check Mask Control Reg */
277 __be32 pctbenr; /* Physical Core Time Base Enable Reg */
278 __be32 pctbclkselr; /* Physical Core Time Base Clock Select */
279 __be32 tbclkdivr; /* Time Base Clock Divider Register */
280 u8 res_1ac[4];
281 __be32 ttbhltcr[4]; /* Thread Time Base Halt Control Register */
282 __be32 clpcl10sr; /* Cluster PCL10 Status Register */
283 __be32 clpcl10setr; /* Cluster PCL30 Set Control Register */
284 __be32 clpcl10clrr; /* Cluster PCL30 Clear Control Register */
285 __be32 clpcl10psr; /* Cluster PCL30 Prev Status Register */
286 __be32 cddslpsetr; /* Core Domain Deep Sleep Set Register */
287 __be32 cddslpclrr; /* Core Domain Deep Sleep Clear Register */
288 __be32 cdpwroksetr; /* Core Domain Power OK Set Register */
289 __be32 cdpwrokclrr; /* Core Domain Power OK Clear Register */
290 __be32 cdpwrensr; /* Core Domain Power Enable Status Register */
291 __be32 cddslsr; /* Core Domain Deep Sleep Status Register */
292 u8 res_1e8[8];
293 __be32 dslpcntcr[8]; /* Deep Sleep Counter Cfg Register */
294 u8 res_300[3568];
295};
296
192#endif 297#endif
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 7ee1774edee5..0141f257d67b 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -16,15 +16,6 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/bug.h> 17#include <linux/bug.h>
18 18
19/*
20 * fsnotify_d_instantiate - instantiate a dentry for inode
21 */
22static inline void fsnotify_d_instantiate(struct dentry *dentry,
23 struct inode *inode)
24{
25 __fsnotify_d_instantiate(dentry, inode);
26}
27
28/* Notify this dentry's parent about a child's events. */ 19/* Notify this dentry's parent about a child's events. */
29static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) 20static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
30{ 21{
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 533c4408529a..1259e53d9296 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -290,14 +290,9 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
290/* 290/*
291 * fsnotify_d_instantiate - instantiate a dentry for inode 291 * fsnotify_d_instantiate - instantiate a dentry for inode
292 */ 292 */
293static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) 293static inline void __fsnotify_d_instantiate(struct dentry *dentry)
294{ 294{
295 if (!inode)
296 return;
297
298 spin_lock(&dentry->d_lock);
299 __fsnotify_update_dcache_flags(dentry); 295 __fsnotify_update_dcache_flags(dentry);
300 spin_unlock(&dentry->d_lock);
301} 296}
302 297
303/* called from fsnotify listeners, such as fanotify or dnotify */ 298/* called from fsnotify listeners, such as fanotify or dnotify */
@@ -396,7 +391,7 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
396static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) 391static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
397{} 392{}
398 393
399static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) 394static inline void __fsnotify_d_instantiate(struct dentry *dentry)
400{} 395{}
401 396
402static inline u32 fsnotify_get_cookie(void) 397static inline u32 fsnotify_get_cookie(void)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index c2b340e23f62..dea12a6e413b 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -713,6 +713,18 @@ static inline void __ftrace_enabled_restore(int enabled)
713#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) 713#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
714#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) 714#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
715 715
716static inline unsigned long get_lock_parent_ip(void)
717{
718 unsigned long addr = CALLER_ADDR0;
719
720 if (!in_lock_functions(addr))
721 return addr;
722 addr = CALLER_ADDR1;
723 if (!in_lock_functions(addr))
724 return addr;
725 return CALLER_ADDR2;
726}
727
716#ifdef CONFIG_IRQSOFF_TRACER 728#ifdef CONFIG_IRQSOFF_TRACER
717 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 729 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
718 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 730 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
@@ -799,16 +811,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
799 */ 811 */
800#define __notrace_funcgraph notrace 812#define __notrace_funcgraph notrace
801 813
802/*
803 * We want to which function is an entrypoint of a hardirq.
804 * That will help us to put a signal on output.
805 */
806#define __irq_entry __attribute__((__section__(".irqentry.text")))
807
808/* Limits of hardirq entrypoints */
809extern char __irqentry_text_start[];
810extern char __irqentry_text_end[];
811
812#define FTRACE_NOTRACE_DEPTH 65536 814#define FTRACE_NOTRACE_DEPTH 65536
813#define FTRACE_RETFUNC_DEPTH 50 815#define FTRACE_RETFUNC_DEPTH 50
814#define FTRACE_RETSTACK_ALLOC_SIZE 32 816#define FTRACE_RETSTACK_ALLOC_SIZE 32
@@ -845,7 +847,6 @@ static inline void unpause_graph_tracing(void)
845#else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 847#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
846 848
847#define __notrace_funcgraph 849#define __notrace_funcgraph
848#define __irq_entry
849#define INIT_FTRACE_GRAPH 850#define INIT_FTRACE_GRAPH
850 851
851static inline void ftrace_graph_init_task(struct task_struct *t) { } 852static inline void ftrace_graph_init_task(struct task_struct *t) { }
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index af1f2b24bbe4..570383a41853 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -9,6 +9,11 @@
9 9
10struct vm_area_struct; 10struct vm_area_struct;
11 11
12/*
13 * In case of changes, please don't forget to update
14 * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
15 */
16
12/* Plain integer GFP bitmasks. Do not use this directly. */ 17/* Plain integer GFP bitmasks. Do not use this directly. */
13#define ___GFP_DMA 0x01u 18#define ___GFP_DMA 0x01u
14#define ___GFP_HIGHMEM 0x02u 19#define ___GFP_HIGHMEM 0x02u
@@ -48,7 +53,6 @@ struct vm_area_struct;
48#define __GFP_DMA ((__force gfp_t)___GFP_DMA) 53#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
49#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) 54#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
50#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) 55#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
51#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
52#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ 56#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
53#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) 57#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
54 58
@@ -101,8 +105,6 @@ struct vm_area_struct;
101 * 105 *
102 * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. 106 * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
103 * This takes precedence over the __GFP_MEMALLOC flag if both are set. 107 * This takes precedence over the __GFP_MEMALLOC flag if both are set.
104 *
105 * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
106 */ 108 */
107#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) 109#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
108#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) 110#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
@@ -255,7 +257,7 @@ struct vm_area_struct;
255#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) 257#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
256#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 258#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
257 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ 259 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
258 ~__GFP_KSWAPD_RECLAIM) 260 ~__GFP_RECLAIM)
259 261
260/* Convert GFP flags to their corresponding migrate type */ 262/* Convert GFP flags to their corresponding migrate type */
261#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) 263#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -329,22 +331,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
329 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) 331 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
330 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) 332 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
331 * 333 *
332 * ZONES_SHIFT must be <= 2 on 32 bit platforms. 334 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
333 */ 335 */
334 336
335#if 16 * ZONES_SHIFT > BITS_PER_LONG 337#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
336#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer 338/* ZONE_DEVICE is not a valid GFP zone specifier */
339#define GFP_ZONES_SHIFT 2
340#else
341#define GFP_ZONES_SHIFT ZONES_SHIFT
342#endif
343
344#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
345#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
337#endif 346#endif
338 347
339#define GFP_ZONE_TABLE ( \ 348#define GFP_ZONE_TABLE ( \
340 (ZONE_NORMAL << 0 * ZONES_SHIFT) \ 349 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
341 | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ 350 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
342 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ 351 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
343 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ 352 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
344 | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ 353 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
345 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ 354 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
346 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ 355 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
347 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ 356 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
348) 357)
349 358
350/* 359/*
@@ -369,8 +378,8 @@ static inline enum zone_type gfp_zone(gfp_t flags)
369 enum zone_type z; 378 enum zone_type z;
370 int bit = (__force int) (flags & GFP_ZONEMASK); 379 int bit = (__force int) (flags & GFP_ZONEMASK);
371 380
372 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & 381 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
373 ((1 << ZONES_SHIFT) - 1); 382 ((1 << GFP_ZONES_SHIFT) - 1);
374 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); 383 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
375 return z; 384 return z;
376} 385}
@@ -515,13 +524,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
515void drain_all_pages(struct zone *zone); 524void drain_all_pages(struct zone *zone);
516void drain_local_pages(struct zone *zone); 525void drain_local_pages(struct zone *zone);
517 526
518#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
519void page_alloc_init_late(void); 527void page_alloc_init_late(void);
520#else
521static inline void page_alloc_init_late(void)
522{
523}
524#endif
525 528
526/* 529/*
527 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 530 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 82fda487453f..bee976f82788 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_GPIO_DRIVER_H 1#ifndef __LINUX_GPIO_DRIVER_H
2#define __LINUX_GPIO_DRIVER_H 2#define __LINUX_GPIO_DRIVER_H
3 3
4#include <linux/device.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/module.h> 6#include <linux/module.h>
6#include <linux/irq.h> 7#include <linux/irq.h>
@@ -10,22 +11,21 @@
10#include <linux/pinctrl/pinctrl.h> 11#include <linux/pinctrl/pinctrl.h>
11#include <linux/kconfig.h> 12#include <linux/kconfig.h>
12 13
13struct device;
14struct gpio_desc; 14struct gpio_desc;
15struct of_phandle_args; 15struct of_phandle_args;
16struct device_node; 16struct device_node;
17struct seq_file; 17struct seq_file;
18struct gpio_device;
18 19
19#ifdef CONFIG_GPIOLIB 20#ifdef CONFIG_GPIOLIB
20 21
21/** 22/**
22 * struct gpio_chip - abstract a GPIO controller 23 * struct gpio_chip - abstract a GPIO controller
23 * @label: for diagnostics 24 * @label: a functional name for the GPIO device, such as a part
25 * number or the name of the SoC IP-block implementing it.
26 * @gpiodev: the internal state holder, opaque struct
24 * @parent: optional parent device providing the GPIOs 27 * @parent: optional parent device providing the GPIOs
25 * @cdev: class device used by sysfs interface (may be NULL)
26 * @owner: helps prevent removal of modules exporting active GPIOs 28 * @owner: helps prevent removal of modules exporting active GPIOs
27 * @data: per-instance data assigned by the driver
28 * @list: links gpio_chips together for traversal
29 * @request: optional hook for chip-specific activation, such as 29 * @request: optional hook for chip-specific activation, such as
30 * enabling module power and clock; may sleep 30 * enabling module power and clock; may sleep
31 * @free: optional hook for chip-specific deactivation, such as 31 * @free: optional hook for chip-specific deactivation, such as
@@ -52,7 +52,6 @@ struct seq_file;
52 * get rid of the static GPIO number space in the long run. 52 * get rid of the static GPIO number space in the long run.
53 * @ngpio: the number of GPIOs handled by this controller; the last GPIO 53 * @ngpio: the number of GPIOs handled by this controller; the last GPIO
54 * handled is (base + ngpio - 1). 54 * handled is (base + ngpio - 1).
55 * @desc: array of ngpio descriptors. Private.
56 * @names: if set, must be an array of strings to use as alternative 55 * @names: if set, must be an array of strings to use as alternative
57 * names for the GPIOs in this chip. Any entry in the array 56 * names for the GPIOs in this chip. Any entry in the array
58 * may be NULL if there is no alias for the GPIO, however the 57 * may be NULL if there is no alias for the GPIO, however the
@@ -107,11 +106,9 @@ struct seq_file;
107 */ 106 */
108struct gpio_chip { 107struct gpio_chip {
109 const char *label; 108 const char *label;
109 struct gpio_device *gpiodev;
110 struct device *parent; 110 struct device *parent;
111 struct device *cdev;
112 struct module *owner; 111 struct module *owner;
113 void *data;
114 struct list_head list;
115 112
116 int (*request)(struct gpio_chip *chip, 113 int (*request)(struct gpio_chip *chip,
117 unsigned offset); 114 unsigned offset);
@@ -141,7 +138,6 @@ struct gpio_chip {
141 struct gpio_chip *chip); 138 struct gpio_chip *chip);
142 int base; 139 int base;
143 u16 ngpio; 140 u16 ngpio;
144 struct gpio_desc *desc;
145 const char *const *names; 141 const char *const *names;
146 bool can_sleep; 142 bool can_sleep;
147 bool irq_not_threaded; 143 bool irq_not_threaded;
@@ -184,15 +180,6 @@ struct gpio_chip {
184 int (*of_xlate)(struct gpio_chip *gc, 180 int (*of_xlate)(struct gpio_chip *gc,
185 const struct of_phandle_args *gpiospec, u32 *flags); 181 const struct of_phandle_args *gpiospec, u32 *flags);
186#endif 182#endif
187#ifdef CONFIG_PINCTRL
188 /*
189 * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally
190 * describe the actual pin range which they serve in an SoC. This
191 * information would be used by pinctrl subsystem to configure
192 * corresponding pins for gpio usage.
193 */
194 struct list_head pin_ranges;
195#endif
196}; 183};
197 184
198extern const char *gpiochip_is_requested(struct gpio_chip *chip, 185extern const char *gpiochip_is_requested(struct gpio_chip *chip,
@@ -205,18 +192,24 @@ static inline int gpiochip_add(struct gpio_chip *chip)
205 return gpiochip_add_data(chip, NULL); 192 return gpiochip_add_data(chip, NULL);
206} 193}
207extern void gpiochip_remove(struct gpio_chip *chip); 194extern void gpiochip_remove(struct gpio_chip *chip);
195extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
196 void *data);
197extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip);
198
208extern struct gpio_chip *gpiochip_find(void *data, 199extern struct gpio_chip *gpiochip_find(void *data,
209 int (*match)(struct gpio_chip *chip, void *data)); 200 int (*match)(struct gpio_chip *chip, void *data));
210 201
211/* lock/unlock as IRQ */ 202/* lock/unlock as IRQ */
212int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); 203int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
213void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); 204void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
205bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
206
207/* Line status inquiry for drivers */
208bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
209bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
214 210
215/* get driver data */ 211/* get driver data */
216static inline void *gpiochip_get_data(struct gpio_chip *chip) 212void *gpiochip_get_data(struct gpio_chip *chip);
217{
218 return chip->data;
219}
220 213
221struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); 214struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
222 215
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 1afde47e1528..79c52fa81cac 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -32,12 +32,28 @@
32#error Wordsize not 32 or 64 32#error Wordsize not 32 or 64
33#endif 33#endif
34 34
35/*
36 * The above primes are actively bad for hashing, since they are
37 * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
38 * real problems. Besides, the "prime" part is pointless for the
39 * multiplicative hash.
40 *
41 * Although a random odd number will do, it turns out that the golden
42 * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
43 * properties.
44 *
45 * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
46 * (See Knuth vol 3, section 6.4, exercise 9.)
47 */
48#define GOLDEN_RATIO_32 0x61C88647
49#define GOLDEN_RATIO_64 0x61C8864680B583EBull
50
35static __always_inline u64 hash_64(u64 val, unsigned int bits) 51static __always_inline u64 hash_64(u64 val, unsigned int bits)
36{ 52{
37 u64 hash = val; 53 u64 hash = val;
38 54
39#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 55#if BITS_PER_LONG == 64
40 hash = hash * GOLDEN_RATIO_PRIME_64; 56 hash = hash * GOLDEN_RATIO_64;
41#else 57#else
42 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ 58 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
43 u64 n = hash; 59 u64 n = hash;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2ead22dd74a0..c98c6539e2c2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -220,7 +220,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time
220 timer->node.expires = ktime_add_safe(time, delta); 220 timer->node.expires = ktime_add_safe(time, delta);
221} 221}
222 222
223static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) 223static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
224{ 224{
225 timer->_softexpires = time; 225 timer->_softexpires = time;
226 timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); 226 timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
@@ -378,7 +378,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
378 378
379/* Basic timer operations: */ 379/* Basic timer operations: */
380extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 380extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
381 unsigned long range_ns, const enum hrtimer_mode mode); 381 u64 range_ns, const enum hrtimer_mode mode);
382 382
383/** 383/**
384 * hrtimer_start - (re)start an hrtimer on the current CPU 384 * hrtimer_start - (re)start an hrtimer on the current CPU
@@ -399,7 +399,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer);
399static inline void hrtimer_start_expires(struct hrtimer *timer, 399static inline void hrtimer_start_expires(struct hrtimer *timer,
400 enum hrtimer_mode mode) 400 enum hrtimer_mode mode)
401{ 401{
402 unsigned long delta; 402 u64 delta;
403 ktime_t soft, hard; 403 ktime_t soft, hard;
404 soft = hrtimer_get_softexpires(timer); 404 soft = hrtimer_get_softexpires(timer);
405 hard = hrtimer_get_expires(timer); 405 hard = hrtimer_get_expires(timer);
@@ -477,10 +477,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
477extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, 477extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
478 struct task_struct *tsk); 478 struct task_struct *tsk);
479 479
480extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, 480extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
481 const enum hrtimer_mode mode); 481 const enum hrtimer_mode mode);
482extern int schedule_hrtimeout_range_clock(ktime_t *expires, 482extern int schedule_hrtimeout_range_clock(ktime_t *expires,
483 unsigned long delta, const enum hrtimer_mode mode, int clock); 483 u64 delta,
484 const enum hrtimer_mode mode,
485 int clock);
484extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); 486extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
485 487
486/* Soft interrupt function to run the hrtimer queues: */ 488/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 459fd25b378e..d7b9e5346fba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -41,7 +41,8 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
41enum transparent_hugepage_flag { 41enum transparent_hugepage_flag {
42 TRANSPARENT_HUGEPAGE_FLAG, 42 TRANSPARENT_HUGEPAGE_FLAG,
43 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 43 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
44 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 44 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
45 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
45 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 46 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
46 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 47 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
47 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 48 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
@@ -71,12 +72,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
71 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 72 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
72 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 73 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
73 !is_vma_temporary_stack(__vma)) 74 !is_vma_temporary_stack(__vma))
74#define transparent_hugepage_defrag(__vma) \
75 ((transparent_hugepage_flags & \
76 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
77 (transparent_hugepage_flags & \
78 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
79 (__vma)->vm_flags & VM_HUGEPAGE))
80#define transparent_hugepage_use_zero_page() \ 75#define transparent_hugepage_use_zero_page() \
81 (transparent_hugepage_flags & \ 76 (transparent_hugepage_flags & \
82 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 77 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -101,19 +96,21 @@ static inline int split_huge_page(struct page *page)
101void deferred_split_huge_page(struct page *page); 96void deferred_split_huge_page(struct page *page);
102 97
103void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 98void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
104 unsigned long address); 99 unsigned long address, bool freeze);
105 100
106#define split_huge_pmd(__vma, __pmd, __address) \ 101#define split_huge_pmd(__vma, __pmd, __address) \
107 do { \ 102 do { \
108 pmd_t *____pmd = (__pmd); \ 103 pmd_t *____pmd = (__pmd); \
109 if (pmd_trans_huge(*____pmd) \ 104 if (pmd_trans_huge(*____pmd) \
110 || pmd_devmap(*____pmd)) \ 105 || pmd_devmap(*____pmd)) \
111 __split_huge_pmd(__vma, __pmd, __address); \ 106 __split_huge_pmd(__vma, __pmd, __address, \
107 false); \
112 } while (0) 108 } while (0)
113 109
114#if HPAGE_PMD_ORDER >= MAX_ORDER 110
115#error "hugepages can't be allocated by the buddy allocator" 111void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
116#endif 112 bool freeze, struct page *page);
113
117extern int hugepage_madvise(struct vm_area_struct *vma, 114extern int hugepage_madvise(struct vm_area_struct *vma,
118 unsigned long *vm_flags, int advice); 115 unsigned long *vm_flags, int advice);
119extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 116extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
@@ -130,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
130 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 127 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
131 return __pmd_trans_huge_lock(pmd, vma); 128 return __pmd_trans_huge_lock(pmd, vma);
132 else 129 else
133 return false; 130 return NULL;
134} 131}
135static inline int hpage_nr_pages(struct page *page) 132static inline int hpage_nr_pages(struct page *page)
136{ 133{
@@ -155,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
155} 152}
156 153
157struct page *get_huge_zero_page(void); 154struct page *get_huge_zero_page(void);
155void put_huge_zero_page(void);
158 156
159#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 157#else /* CONFIG_TRANSPARENT_HUGEPAGE */
160#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 158#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -178,6 +176,10 @@ static inline int split_huge_page(struct page *page)
178static inline void deferred_split_huge_page(struct page *page) {} 176static inline void deferred_split_huge_page(struct page *page) {}
179#define split_huge_pmd(__vma, __pmd, __address) \ 177#define split_huge_pmd(__vma, __pmd, __address) \
180 do { } while (0) 178 do { } while (0)
179
180static inline void split_huge_pmd_address(struct vm_area_struct *vma,
181 unsigned long address, bool freeze, struct page *page) {}
182
181static inline int hugepage_madvise(struct vm_area_struct *vma, 183static inline int hugepage_madvise(struct vm_area_struct *vma,
182 unsigned long *vm_flags, int advice) 184 unsigned long *vm_flags, int advice)
183{ 185{
@@ -207,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page)
207 return false; 209 return false;
208} 210}
209 211
212static inline void put_huge_zero_page(void)
213{
214 BUILD_BUG();
215}
210 216
211static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 217static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
212 unsigned long addr, pmd_t *pmd, int flags) 218 unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 753dbad0bf94..aa0fadce9308 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -235,6 +235,7 @@ struct vmbus_channel_offer {
235#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 235#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
236#define VMBUS_CHANNEL_PARENT_OFFER 0x200 236#define VMBUS_CHANNEL_PARENT_OFFER 0x200
237#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 237#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
238#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
238 239
239struct vmpacket_descriptor { 240struct vmpacket_descriptor {
240 u16 type; 241 u16 type;
@@ -391,6 +392,10 @@ enum vmbus_channel_message_type {
391 CHANNELMSG_VERSION_RESPONSE = 15, 392 CHANNELMSG_VERSION_RESPONSE = 15,
392 CHANNELMSG_UNLOAD = 16, 393 CHANNELMSG_UNLOAD = 16,
393 CHANNELMSG_UNLOAD_RESPONSE = 17, 394 CHANNELMSG_UNLOAD_RESPONSE = 17,
395 CHANNELMSG_18 = 18,
396 CHANNELMSG_19 = 19,
397 CHANNELMSG_20 = 20,
398 CHANNELMSG_TL_CONNECT_REQUEST = 21,
394 CHANNELMSG_COUNT 399 CHANNELMSG_COUNT
395}; 400};
396 401
@@ -561,6 +566,13 @@ struct vmbus_channel_initiate_contact {
561 u64 monitor_page2; 566 u64 monitor_page2;
562} __packed; 567} __packed;
563 568
569/* Hyper-V socket: guest's connect()-ing to host */
570struct vmbus_channel_tl_connect_request {
571 struct vmbus_channel_message_header header;
572 uuid_le guest_endpoint_id;
573 uuid_le host_service_id;
574} __packed;
575
564struct vmbus_channel_version_response { 576struct vmbus_channel_version_response {
565 struct vmbus_channel_message_header header; 577 struct vmbus_channel_message_header header;
566 u8 version_supported; 578 u8 version_supported;
@@ -633,6 +645,32 @@ enum hv_signal_policy {
633 HV_SIGNAL_POLICY_EXPLICIT, 645 HV_SIGNAL_POLICY_EXPLICIT,
634}; 646};
635 647
648enum vmbus_device_type {
649 HV_IDE = 0,
650 HV_SCSI,
651 HV_FC,
652 HV_NIC,
653 HV_ND,
654 HV_PCIE,
655 HV_FB,
656 HV_KBD,
657 HV_MOUSE,
658 HV_KVP,
659 HV_TS,
660 HV_HB,
661 HV_SHUTDOWN,
662 HV_FCOPY,
663 HV_BACKUP,
664 HV_DM,
665 HV_UNKOWN,
666};
667
668struct vmbus_device {
669 u16 dev_type;
670 uuid_le guid;
671 bool perf_device;
672};
673
636struct vmbus_channel { 674struct vmbus_channel {
637 /* Unique channel id */ 675 /* Unique channel id */
638 int id; 676 int id;
@@ -728,6 +766,12 @@ struct vmbus_channel {
728 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 766 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
729 767
730 /* 768 /*
769 * Channel rescind callback. Some channels (the hvsock ones), need to
770 * register a callback which is invoked in vmbus_onoffer_rescind().
771 */
772 void (*chn_rescind_callback)(struct vmbus_channel *channel);
773
774 /*
731 * The spinlock to protect the structure. It is being used to protect 775 * The spinlock to protect the structure. It is being used to protect
732 * test-and-set access to various attributes of the structure as well 776 * test-and-set access to various attributes of the structure as well
733 * as all sc_list operations. 777 * as all sc_list operations.
@@ -767,8 +811,30 @@ struct vmbus_channel {
767 * signaling control. 811 * signaling control.
768 */ 812 */
769 enum hv_signal_policy signal_policy; 813 enum hv_signal_policy signal_policy;
814 /*
815 * On the channel send side, many of the VMBUS
816 * device drivers explicity serialize access to the
817 * outgoing ring buffer. Give more control to the
818 * VMBUS device drivers in terms how to serialize
819 * accesss to the outgoing ring buffer.
820 * The default behavior will be to aquire the
821 * ring lock to preserve the current behavior.
822 */
823 bool acquire_ring_lock;
824
770}; 825};
771 826
827static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
828{
829 c->acquire_ring_lock = state;
830}
831
832static inline bool is_hvsock_channel(const struct vmbus_channel *c)
833{
834 return !!(c->offermsg.offer.chn_flags &
835 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
836}
837
772static inline void set_channel_signal_state(struct vmbus_channel *c, 838static inline void set_channel_signal_state(struct vmbus_channel *c,
773 enum hv_signal_policy policy) 839 enum hv_signal_policy policy)
774{ 840{
@@ -790,6 +856,12 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
790 return c->per_channel_state; 856 return c->per_channel_state;
791} 857}
792 858
859static inline void set_channel_pending_send_size(struct vmbus_channel *c,
860 u32 size)
861{
862 c->outbound.ring_buffer->pending_send_sz = size;
863}
864
793void vmbus_onmessage(void *context); 865void vmbus_onmessage(void *context);
794 866
795int vmbus_request_offers(void); 867int vmbus_request_offers(void);
@@ -801,6 +873,9 @@ int vmbus_request_offers(void);
801void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 873void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
802 void (*sc_cr_cb)(struct vmbus_channel *new_sc)); 874 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
803 875
876void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
877 void (*chn_rescind_cb)(struct vmbus_channel *));
878
804/* 879/*
805 * Retrieve the (sub) channel on which to send an outgoing request. 880 * Retrieve the (sub) channel on which to send an outgoing request.
806 * When a primary channel has multiple sub-channels, we choose a 881 * When a primary channel has multiple sub-channels, we choose a
@@ -940,6 +1015,20 @@ extern void vmbus_ontimer(unsigned long data);
940struct hv_driver { 1015struct hv_driver {
941 const char *name; 1016 const char *name;
942 1017
1018 /*
1019 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1020 * channel flag, actually doesn't mean a synthetic device because the
1021 * offer's if_type/if_instance can change for every new hvsock
1022 * connection.
1023 *
1024 * However, to facilitate the notification of new-offer/rescind-offer
1025 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1026 * a special vmbus device, and hence we need the below flag to
1027 * indicate if the driver is the hvsock driver or not: we need to
1028 * specially treat the hvosck offer & driver in vmbus_match().
1029 */
1030 bool hvsock;
1031
943 /* the device type supported by this driver */ 1032 /* the device type supported by this driver */
944 uuid_le dev_type; 1033 uuid_le dev_type;
945 const struct hv_vmbus_device_id *id_table; 1034 const struct hv_vmbus_device_id *id_table;
@@ -959,6 +1048,8 @@ struct hv_device {
959 1048
960 /* the device instance id of this device */ 1049 /* the device instance id of this device */
961 uuid_le dev_instance; 1050 uuid_le dev_instance;
1051 u16 vendor_id;
1052 u16 device_id;
962 1053
963 struct device device; 1054 struct device device;
964 1055
@@ -994,6 +1085,8 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
994 const char *mod_name); 1085 const char *mod_name);
995void vmbus_driver_unregister(struct hv_driver *hv_driver); 1086void vmbus_driver_unregister(struct hv_driver *hv_driver);
996 1087
1088void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1089
997int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1090int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
998 resource_size_t min, resource_size_t max, 1091 resource_size_t min, resource_size_t max,
999 resource_size_t size, resource_size_t align, 1092 resource_size_t size, resource_size_t align,
@@ -1158,6 +1251,7 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
1158 1251
1159struct hv_util_service { 1252struct hv_util_service {
1160 u8 *recv_buffer; 1253 u8 *recv_buffer;
1254 void *channel;
1161 void (*util_cb)(void *); 1255 void (*util_cb)(void *);
1162 int (*util_init)(struct hv_util_service *); 1256 int (*util_init)(struct hv_util_service *);
1163 void (*util_deinit)(void); 1257 void (*util_deinit)(void);
@@ -1242,4 +1336,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1242 1336
1243extern __u32 vmbus_proto_version; 1337extern __u32 vmbus_proto_version;
1244 1338
1339int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1340 const uuid_le *shv_host_servie_id);
1245#endif /* _HYPERV_H */ 1341#endif /* _HYPERV_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 452c0b0d2f32..3b1f6cef9513 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
163/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ 163/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
164#define IEEE80211_MAX_FRAME_LEN 2352 164#define IEEE80211_MAX_FRAME_LEN 2352
165 165
166/* Maximal size of an A-MSDU */
167#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
168#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
169
170#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
171#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
172#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
173
166#define IEEE80211_MAX_SSID_LEN 32 174#define IEEE80211_MAX_SSID_LEN 32
167 175
168#define IEEE80211_MAX_MESH_ID_LEN 32 176#define IEEE80211_MAX_MESH_ID_LEN 32
@@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits {
843}; 851};
844 852
845#define WLAN_SA_QUERY_TR_ID_LEN 2 853#define WLAN_SA_QUERY_TR_ID_LEN 2
854#define WLAN_MEMBERSHIP_LEN 8
855#define WLAN_USER_POSITION_LEN 16
846 856
847/** 857/**
848 * struct ieee80211_tpc_report_ie 858 * struct ieee80211_tpc_report_ie
@@ -991,6 +1001,11 @@ struct ieee80211_mgmt {
991 } __packed vht_opmode_notif; 1001 } __packed vht_opmode_notif;
992 struct { 1002 struct {
993 u8 action_code; 1003 u8 action_code;
1004 u8 membership[WLAN_MEMBERSHIP_LEN];
1005 u8 position[WLAN_USER_POSITION_LEN];
1006 } __packed vht_group_notif;
1007 struct {
1008 u8 action_code;
994 u8 dialog_token; 1009 u8 dialog_token;
995 u8 tpc_elem_id; 1010 u8 tpc_elem_id;
996 u8 tpc_elem_length; 1011 u8 tpc_elem_length;
@@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation {
1498#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 1513#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
1499#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 1514#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
1500#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002 1515#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
1516#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
1501#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 1517#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
1502#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 1518#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
1503#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C 1519#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
@@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode {
2079#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5) 2095#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
2080#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) 2096#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
2081 2097
2098/* Defines the maximal number of MSDUs in an A-MSDU. */
2099#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7)
2100#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0)
2101
2102/*
2103 * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY
2104 * information element
2105 */
2106#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
2107
2082/* TDLS specific payload type in the LLC/SNAP header */ 2108/* TDLS specific payload type in the LLC/SNAP header */
2083#define WLAN_TDLS_SNAP_RFTYPE 0x2 2109#define WLAN_TDLS_SNAP_RFTYPE 0x2
2084 2110
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index a338a688ee4a..dcb89e3515db 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,10 +46,6 @@ struct br_ip_list {
46#define BR_LEARNING_SYNC BIT(9) 46#define BR_LEARNING_SYNC BIT(9)
47#define BR_PROXYARP_WIFI BIT(10) 47#define BR_PROXYARP_WIFI BIT(10)
48 48
49/* values as per ieee8021QBridgeFdbAgingTime */
50#define BR_MIN_AGEING_TIME (10 * HZ)
51#define BR_MAX_AGEING_TIME (1000000 * HZ)
52
53#define BR_DEFAULT_AGEING_TIME (300 * HZ) 49#define BR_DEFAULT_AGEING_TIME (300 * HZ)
54 50
55extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 51extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d5569734f672..548fd535fd02 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
28 return (struct ethhdr *)skb_mac_header(skb); 28 return (struct ethhdr *)skb_mac_header(skb);
29} 29}
30 30
31static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
32{
33 return (struct ethhdr *)skb_inner_mac_header(skb);
34}
35
31int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 36int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
32 37
33extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 38extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index b84e49c3a738..174f43f43aff 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -24,6 +24,7 @@ struct team_pcpu_stats {
24 struct u64_stats_sync syncp; 24 struct u64_stats_sync syncp;
25 u32 rx_dropped; 25 u32 rx_dropped;
26 u32 tx_dropped; 26 u32 tx_dropped;
27 u32 rx_nohandler;
27}; 28};
28 29
29struct team; 30struct team;
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 9c9de11549a7..12f6fba6d21a 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,11 +37,6 @@ static inline struct igmpv3_query *
37 return (struct igmpv3_query *)skb_transport_header(skb); 37 return (struct igmpv3_query *)skb_transport_header(skb);
38} 38}
39 39
40extern int sysctl_igmp_llm_reports;
41extern int sysctl_igmp_max_memberships;
42extern int sysctl_igmp_max_msf;
43extern int sysctl_igmp_qrv;
44
45struct ip_sf_socklist { 40struct ip_sf_socklist {
46 unsigned int sl_max; 41 unsigned int sl_max;
47 unsigned int sl_count; 42 unsigned int sl_count;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2fe939c73cd2..6670c3d25c58 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -119,6 +119,8 @@ struct st_sensor_bdu {
119 * @addr: address of the register. 119 * @addr: address of the register.
120 * @mask_int1: mask to enable/disable IRQ on INT1 pin. 120 * @mask_int1: mask to enable/disable IRQ on INT1 pin.
121 * @mask_int2: mask to enable/disable IRQ on INT2 pin. 121 * @mask_int2: mask to enable/disable IRQ on INT2 pin.
122 * @addr_ihl: address to enable/disable active low on the INT lines.
123 * @mask_ihl: mask to enable/disable active low on the INT lines.
122 * struct ig1 - represents the Interrupt Generator 1 of sensors. 124 * struct ig1 - represents the Interrupt Generator 1 of sensors.
123 * @en_addr: address of the enable ig1 register. 125 * @en_addr: address of the enable ig1 register.
124 * @en_mask: mask to write the on/off value for enable. 126 * @en_mask: mask to write the on/off value for enable.
@@ -127,6 +129,8 @@ struct st_sensor_data_ready_irq {
127 u8 addr; 129 u8 addr;
128 u8 mask_int1; 130 u8 mask_int1;
129 u8 mask_int2; 131 u8 mask_int2;
132 u8 addr_ihl;
133 u8 mask_ihl;
130 struct { 134 struct {
131 u8 en_addr; 135 u8 en_addr;
132 u8 en_mask; 136 u8 en_mask;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b5894118755f..b2b16772c651 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -180,18 +180,18 @@ struct iio_event_spec {
180 * @address: Driver specific identifier. 180 * @address: Driver specific identifier.
181 * @scan_index: Monotonic index to give ordering in scans when read 181 * @scan_index: Monotonic index to give ordering in scans when read
182 * from a buffer. 182 * from a buffer.
183 * @scan_type: Sign: 's' or 'u' to specify signed or unsigned 183 * @scan_type: sign: 's' or 'u' to specify signed or unsigned
184 * realbits: Number of valid bits of data 184 * realbits: Number of valid bits of data
185 * storage_bits: Realbits + padding 185 * storagebits: Realbits + padding
186 * shift: Shift right by this before masking out 186 * shift: Shift right by this before masking out
187 * realbits. 187 * realbits.
188 * endianness: little or big endian
189 * repeat: Number of times real/storage bits 188 * repeat: Number of times real/storage bits
190 * repeats. When the repeat element is 189 * repeats. When the repeat element is
191 * more than 1, then the type element in 190 * more than 1, then the type element in
192 * sysfs will show a repeat value. 191 * sysfs will show a repeat value.
193 * Otherwise, the number of repetitions is 192 * Otherwise, the number of repetitions is
194 * omitted. 193 * omitted.
194 * endianness: little or big endian
195 * @info_mask_separate: What information is to be exported that is specific to 195 * @info_mask_separate: What information is to be exported that is specific to
196 * this channel. 196 * this channel.
197 * @info_mask_shared_by_type: What information is to be exported that is shared 197 * @info_mask_shared_by_type: What information is to be exported that is shared
@@ -448,7 +448,7 @@ struct iio_buffer_setup_ops {
448 * @buffer: [DRIVER] any buffer present 448 * @buffer: [DRIVER] any buffer present
449 * @buffer_list: [INTERN] list of all buffers currently attached 449 * @buffer_list: [INTERN] list of all buffers currently attached
450 * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux 450 * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
451 * @mlock: [INTERN] lock used to prevent simultaneous device state 451 * @mlock: [DRIVER] lock used to prevent simultaneous device state
452 * changes 452 * changes
453 * @available_scan_masks: [DRIVER] optional array of allowed bitmasks 453 * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
454 * @masklength: [INTERN] the length of the mask established from 454 * @masklength: [INTERN] the length of the mask established from
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 120ccc53fcb7..e6516cbbe9bf 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -18,8 +18,9 @@ extern int ima_bprm_check(struct linux_binprm *bprm);
18extern int ima_file_check(struct file *file, int mask, int opened); 18extern int ima_file_check(struct file *file, int mask, int opened);
19extern void ima_file_free(struct file *file); 19extern void ima_file_free(struct file *file);
20extern int ima_file_mmap(struct file *file, unsigned long prot); 20extern int ima_file_mmap(struct file *file, unsigned long prot);
21extern int ima_module_check(struct file *file); 21extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
22extern int ima_fw_from_file(struct file *file, char *buf, size_t size); 22extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
23 enum kernel_read_file_id id);
23 24
24#else 25#else
25static inline int ima_bprm_check(struct linux_binprm *bprm) 26static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -42,12 +43,13 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
42 return 0; 43 return 0;
43} 44}
44 45
45static inline int ima_module_check(struct file *file) 46static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
46{ 47{
47 return 0; 48 return 0;
48} 49}
49 50
50static inline int ima_fw_from_file(struct file *file, char *buf, size_t size) 51static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
52 enum kernel_read_file_id id)
51{ 53{
52 return 0; 54 return 0;
53} 55}
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h
deleted file mode 100644
index 9a715cfa1fe3..000000000000
--- a/include/linux/inet_lro.h
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * linux/include/linux/inet_lro.h
3 *
4 * Large Receive Offload (ipv4 / tcp)
5 *
6 * (C) Copyright IBM Corp. 2007
7 *
8 * Authors:
9 * Jan-Bernd Themann <themann@de.ibm.com>
10 * Christoph Raisch <raisch@de.ibm.com>
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef __INET_LRO_H_
29#define __INET_LRO_H_
30
31#include <net/ip.h>
32#include <net/tcp.h>
33
34/*
35 * LRO statistics
36 */
37
38struct net_lro_stats {
39 unsigned long aggregated;
40 unsigned long flushed;
41 unsigned long no_desc;
42};
43
44/*
45 * LRO descriptor for a tcp session
46 */
47struct net_lro_desc {
48 struct sk_buff *parent;
49 struct sk_buff *last_skb;
50 struct skb_frag_struct *next_frag;
51 struct iphdr *iph;
52 struct tcphdr *tcph;
53 __wsum data_csum;
54 __be32 tcp_rcv_tsecr;
55 __be32 tcp_rcv_tsval;
56 __be32 tcp_ack;
57 u32 tcp_next_seq;
58 u32 skb_tot_frags_len;
59 u16 ip_tot_len;
60 u16 tcp_saw_tstamp; /* timestamps enabled */
61 __be16 tcp_window;
62 int pkt_aggr_cnt; /* counts aggregated packets */
63 int vlan_packet;
64 int mss;
65 int active;
66};
67
68/*
69 * Large Receive Offload (LRO) Manager
70 *
71 * Fields must be set by driver
72 */
73
74struct net_lro_mgr {
75 struct net_device *dev;
76 struct net_lro_stats stats;
77
78 /* LRO features */
79 unsigned long features;
80#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
81#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
82 from received packets and eth protocol
83 is still ETH_P_8021Q */
84
85 /*
86 * Set for generated SKBs that are not added to
87 * the frag list in fragmented mode
88 */
89 u32 ip_summed;
90 u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
91 * or CHECKSUM_NONE */
92
93 int max_desc; /* Max number of LRO descriptors */
94 int max_aggr; /* Max number of LRO packets to be aggregated */
95
96 int frag_align_pad; /* Padding required to properly align layer 3
97 * headers in generated skb when using frags */
98
99 struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
100
101 /*
102 * Optimized driver functions
103 *
104 * get_skb_header: returns tcp and ip header for packet in SKB
105 */
106 int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
107 void **tcpudp_hdr, u64 *hdr_flags, void *priv);
108
109 /* hdr_flags: */
110#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
111#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
112
113 /*
114 * get_frag_header: returns mac, tcp and ip header for packet in SKB
115 *
116 * @hdr_flags: Indicate what kind of LRO has to be done
117 * (IPv4/IPv6/TCP/UDP)
118 */
119 int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
120 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
121 void *priv);
122};
123
124/*
125 * Processes a SKB
126 *
127 * @lro_mgr: LRO manager to use
128 * @skb: SKB to aggregate
129 * @priv: Private data that may be used by driver functions
130 * (for example get_tcp_ip_hdr)
131 */
132
133void lro_receive_skb(struct net_lro_mgr *lro_mgr,
134 struct sk_buff *skb,
135 void *priv);
136/*
137 * Forward all aggregated SKBs held by lro_mgr to network stack
138 */
139
140void lro_flush_all(struct net_lro_mgr *lro_mgr);
141
142#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index b449f378f995..aedb254abc37 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -142,6 +142,10 @@ void prepare_namespace(void);
142void __init load_default_modules(void); 142void __init load_default_modules(void);
143int __init init_rootfs(void); 143int __init init_rootfs(void);
144 144
145#ifdef CONFIG_DEBUG_RODATA
146void mark_rodata_ro(void);
147#endif
148
145extern void (*late_time_init)(void); 149extern void (*late_time_init)(void);
146 150
147extern bool initcall_debug; 151extern bool initcall_debug;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0e95fcc75b2a..9fcabeb07787 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -125,6 +125,16 @@ struct irqaction {
125 125
126extern irqreturn_t no_action(int cpl, void *dev_id); 126extern irqreturn_t no_action(int cpl, void *dev_id);
127 127
128/*
129 * If a (PCI) device interrupt is not connected we set dev->irq to
130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131 * can distingiush that case from other error returns.
132 *
133 * 0x80000000 is guaranteed to be outside the available range of interrupts
134 * and easy to distinguish from other possible incorrect values.
135 */
136#define IRQ_NOTCONNECTED (1U << 31)
137
128extern int __must_check 138extern int __must_check
129request_threaded_irq(unsigned int irq, irq_handler_t handler, 139request_threaded_irq(unsigned int irq, irq_handler_t handler,
130 irq_handler_t thread_fn, 140 irq_handler_t thread_fn,
@@ -673,4 +683,24 @@ extern int early_irq_init(void);
673extern int arch_probe_nr_irqs(void); 683extern int arch_probe_nr_irqs(void);
674extern int arch_early_irq_init(void); 684extern int arch_early_irq_init(void);
675 685
686#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
687/*
688 * We want to know which function is an entrypoint of a hardirq or a softirq.
689 */
690#define __irq_entry __attribute__((__section__(".irqentry.text")))
691#define __softirq_entry \
692 __attribute__((__section__(".softirqentry.text")))
693
694/* Limits of hardirq entrypoints */
695extern char __irqentry_text_start[];
696extern char __irqentry_text_end[];
697/* Limits of softirq entrypoints */
698extern char __softirqentry_text_start[];
699extern char __softirqentry_text_end[];
700
701#else
702#define __irq_entry
703#define __softirq_entry
704#endif
705
676#endif 706#endif
diff --git a/include/linux/io.h b/include/linux/io.h
index 32403b5716e5..e2c8419278c1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -135,6 +135,7 @@ enum {
135 /* See memremap() kernel-doc for usage description... */ 135 /* See memremap() kernel-doc for usage description... */
136 MEMREMAP_WB = 1 << 0, 136 MEMREMAP_WB = 1 << 0,
137 MEMREMAP_WT = 1 << 1, 137 MEMREMAP_WT = 1 << 1,
138 MEMREMAP_WC = 1 << 2,
138}; 139};
139 140
140void *memremap(resource_size_t offset, size_t size, unsigned long flags); 141void *memremap(resource_size_t offset, size_t size, unsigned long flags);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a5c539fa5d2b..ef7a6ecd8584 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -195,9 +195,7 @@ struct iommu_ops {
195 /* Get the number of windows per domain */ 195 /* Get the number of windows per domain */
196 u32 (*domain_get_windows)(struct iommu_domain *domain); 196 u32 (*domain_get_windows)(struct iommu_domain *domain);
197 197
198#ifdef CONFIG_OF_IOMMU
199 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 198 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
200#endif
201 199
202 unsigned long pgsize_bitmap; 200 unsigned long pgsize_bitmap;
203 void *priv; 201 void *priv;
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 24bea087e7af..0b65543dc6cf 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -20,6 +20,7 @@ struct resource {
20 resource_size_t end; 20 resource_size_t end;
21 const char *name; 21 const char *name;
22 unsigned long flags; 22 unsigned long flags;
23 unsigned long desc;
23 struct resource *parent, *sibling, *child; 24 struct resource *parent, *sibling, *child;
24}; 25};
25 26
@@ -49,12 +50,19 @@ struct resource {
49#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ 50#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
50#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */ 51#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
51 52
53#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
54#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
55
52#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ 56#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
57
53#define IORESOURCE_DISABLED 0x10000000 58#define IORESOURCE_DISABLED 0x10000000
54#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */ 59#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
55#define IORESOURCE_AUTO 0x40000000 60#define IORESOURCE_AUTO 0x40000000
56#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ 61#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
57 62
63/* I/O resource extended types */
64#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
65
58/* PnP IRQ specific bits (IORESOURCE_BITS) */ 66/* PnP IRQ specific bits (IORESOURCE_BITS) */
59#define IORESOURCE_IRQ_HIGHEDGE (1<<0) 67#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
60#define IORESOURCE_IRQ_LOWEDGE (1<<1) 68#define IORESOURCE_IRQ_LOWEDGE (1<<1)
@@ -98,13 +106,27 @@ struct resource {
98 106
99/* PCI ROM control bits (IORESOURCE_BITS) */ 107/* PCI ROM control bits (IORESOURCE_BITS) */
100#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ 108#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
101#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ 109#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */
102#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
103#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
104 110
105/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ 111/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
106#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ 112#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
107 113
114/*
115 * I/O Resource Descriptors
116 *
117 * Descriptors are used by walk_iomem_res_desc() and region_intersects()
118 * for searching a specific resource range in the iomem table. Assign
119 * a new descriptor when a resource range supports the search interfaces.
120 * Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
121 */
122enum {
123 IORES_DESC_NONE = 0,
124 IORES_DESC_CRASH_KERNEL = 1,
125 IORES_DESC_ACPI_TABLES = 2,
126 IORES_DESC_ACPI_NV_STORAGE = 3,
127 IORES_DESC_PERSISTENT_MEMORY = 4,
128 IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
129};
108 130
109/* helpers to define resources */ 131/* helpers to define resources */
110#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ 132#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
@@ -113,6 +135,7 @@ struct resource {
113 .end = (_start) + (_size) - 1, \ 135 .end = (_start) + (_size) - 1, \
114 .name = (_name), \ 136 .name = (_name), \
115 .flags = (_flags), \ 137 .flags = (_flags), \
138 .desc = IORES_DESC_NONE, \
116 } 139 }
117 140
118#define DEFINE_RES_IO_NAMED(_start, _size, _name) \ 141#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
@@ -149,6 +172,7 @@ extern void reserve_region_with_split(struct resource *root,
149extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); 172extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
150extern int insert_resource(struct resource *parent, struct resource *new); 173extern int insert_resource(struct resource *parent, struct resource *new);
151extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); 174extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
175extern int remove_resource(struct resource *old);
152extern void arch_remove_reservations(struct resource *avail); 176extern void arch_remove_reservations(struct resource *avail);
153extern int allocate_resource(struct resource *root, struct resource *new, 177extern int allocate_resource(struct resource *root, struct resource *new,
154 resource_size_t size, resource_size_t min, 178 resource_size_t size, resource_size_t min,
@@ -170,6 +194,10 @@ static inline unsigned long resource_type(const struct resource *res)
170{ 194{
171 return res->flags & IORESOURCE_TYPE_BITS; 195 return res->flags & IORESOURCE_TYPE_BITS;
172} 196}
197static inline unsigned long resource_ext_type(const struct resource *res)
198{
199 return res->flags & IORESOURCE_EXT_TYPE_BITS;
200}
173/* True iff r1 completely contains r2 */ 201/* True iff r1 completely contains r2 */
174static inline bool resource_contains(struct resource *r1, struct resource *r2) 202static inline bool resource_contains(struct resource *r1, struct resource *r2)
175{ 203{
@@ -239,8 +267,8 @@ extern int
239walk_system_ram_res(u64 start, u64 end, void *arg, 267walk_system_ram_res(u64 start, u64 end, void *arg,
240 int (*func)(u64, u64, void *)); 268 int (*func)(u64, u64, void *));
241extern int 269extern int
242walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg, 270walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
243 int (*func)(u64, u64, void *)); 271 void *arg, int (*func)(u64, u64, void *));
244 272
245/* True if any part of r1 overlaps r2 */ 273/* True if any part of r1 overlaps r2 */
246static inline bool resource_overlaps(struct resource *r1, struct resource *r2) 274static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 402753bccafa..7edc14fb66b6 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -50,16 +50,19 @@ struct ipv6_devconf {
50 __s32 mc_forwarding; 50 __s32 mc_forwarding;
51#endif 51#endif
52 __s32 disable_ipv6; 52 __s32 disable_ipv6;
53 __s32 drop_unicast_in_l2_multicast;
53 __s32 accept_dad; 54 __s32 accept_dad;
54 __s32 force_tllao; 55 __s32 force_tllao;
55 __s32 ndisc_notify; 56 __s32 ndisc_notify;
56 __s32 suppress_frag_ndisc; 57 __s32 suppress_frag_ndisc;
57 __s32 accept_ra_mtu; 58 __s32 accept_ra_mtu;
59 __s32 drop_unsolicited_na;
58 struct ipv6_stable_secret { 60 struct ipv6_stable_secret {
59 bool initialized; 61 bool initialized;
60 struct in6_addr secret; 62 struct in6_addr secret;
61 } stable_secret; 63 } stable_secret;
62 __s32 use_oif_addrs_only; 64 __s32 use_oif_addrs_only;
65 __s32 keep_addr_on_down;
63 void *sysctl; 66 void *sysctl;
64}; 67};
65 68
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3c1c96786248..c4de62348ff2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -133,17 +133,23 @@ struct irq_domain;
133 * Use accessor functions to deal with it 133 * Use accessor functions to deal with it
134 * @node: node index useful for balancing 134 * @node: node index useful for balancing
135 * @handler_data: per-IRQ data for the irq_chip methods 135 * @handler_data: per-IRQ data for the irq_chip methods
136 * @affinity: IRQ affinity on SMP 136 * @affinity: IRQ affinity on SMP. If this is an IPI
137 * related irq, then this is the mask of the
138 * CPUs to which an IPI can be sent.
137 * @msi_desc: MSI descriptor 139 * @msi_desc: MSI descriptor
140 * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional.
138 */ 141 */
139struct irq_common_data { 142struct irq_common_data {
140 unsigned int state_use_accessors; 143 unsigned int __private state_use_accessors;
141#ifdef CONFIG_NUMA 144#ifdef CONFIG_NUMA
142 unsigned int node; 145 unsigned int node;
143#endif 146#endif
144 void *handler_data; 147 void *handler_data;
145 struct msi_desc *msi_desc; 148 struct msi_desc *msi_desc;
146 cpumask_var_t affinity; 149 cpumask_var_t affinity;
150#ifdef CONFIG_GENERIC_IRQ_IPI
151 unsigned int ipi_offset;
152#endif
147}; 153};
148 154
149/** 155/**
@@ -208,7 +214,7 @@ enum {
208 IRQD_FORWARDED_TO_VCPU = (1 << 20), 214 IRQD_FORWARDED_TO_VCPU = (1 << 20),
209}; 215};
210 216
211#define __irqd_to_state(d) ((d)->common->state_use_accessors) 217#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
212 218
213static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 219static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
214{ 220{
@@ -299,6 +305,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
299 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; 305 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
300} 306}
301 307
308#undef __irqd_to_state
309
302static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 310static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
303{ 311{
304 return d->hwirq; 312 return d->hwirq;
@@ -341,6 +349,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
341 * @irq_get_irqchip_state: return the internal state of an interrupt 349 * @irq_get_irqchip_state: return the internal state of an interrupt
342 * @irq_set_irqchip_state: set the internal state of a interrupt 350 * @irq_set_irqchip_state: set the internal state of a interrupt
343 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine 351 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
352 * @ipi_send_single: send a single IPI to destination cpus
353 * @ipi_send_mask: send an IPI to destination cpus in cpumask
344 * @flags: chip specific flags 354 * @flags: chip specific flags
345 */ 355 */
346struct irq_chip { 356struct irq_chip {
@@ -385,6 +395,9 @@ struct irq_chip {
385 395
386 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); 396 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
387 397
398 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
399 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
400
388 unsigned long flags; 401 unsigned long flags;
389}; 402};
390 403
@@ -934,4 +947,12 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
934 return readl(gc->reg_base + reg_offset); 947 return readl(gc->reg_base + reg_offset);
935} 948}
936 949
950/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
951#define INVALID_HWIRQ (~0UL)
952irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
953int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
954int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
955int ipi_send_single(unsigned int virq, unsigned int cpu);
956int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
957
937#endif /* _LINUX_IRQ_H */ 958#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index ce824db48d64..80f89e4a29ac 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -261,9 +261,6 @@ extern void gic_write_compare(cycle_t cnt);
261extern void gic_write_cpu_compare(cycle_t cnt, int cpu); 261extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
262extern void gic_start_count(void); 262extern void gic_start_count(void);
263extern void gic_stop_count(void); 263extern void gic_stop_count(void);
264extern void gic_send_ipi(unsigned int intr);
265extern unsigned int plat_ipi_call_int_xlate(unsigned int);
266extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
267extern int gic_get_c0_compare_int(void); 264extern int gic_get_c0_compare_int(void);
268extern int gic_get_c0_perfcount_int(void); 265extern int gic_get_c0_perfcount_int(void);
269extern int gic_get_c0_fdc_int(void); 266extern int gic_get_c0_fdc_int(void);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 04579d9fbce4..2aed04396210 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -74,6 +74,8 @@ enum irq_domain_bus_token {
74 DOMAIN_BUS_PCI_MSI, 74 DOMAIN_BUS_PCI_MSI,
75 DOMAIN_BUS_PLATFORM_MSI, 75 DOMAIN_BUS_PLATFORM_MSI,
76 DOMAIN_BUS_NEXUS, 76 DOMAIN_BUS_NEXUS,
77 DOMAIN_BUS_IPI,
78 DOMAIN_BUS_FSL_MC_MSI,
77}; 79};
78 80
79/** 81/**
@@ -172,6 +174,12 @@ enum {
172 /* Core calls alloc/free recursive through the domain hierarchy. */ 174 /* Core calls alloc/free recursive through the domain hierarchy. */
173 IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), 175 IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
174 176
177 /* Irq domain is an IPI domain with virq per cpu */
178 IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
179
180 /* Irq domain is an IPI domain with single virq */
181 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
182
175 /* 183 /*
176 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved 184 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
177 * for implementation specific purposes and ignored by the 185 * for implementation specific purposes and ignored by the
@@ -206,6 +214,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
206extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, 214extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
207 enum irq_domain_bus_token bus_token); 215 enum irq_domain_bus_token bus_token);
208extern void irq_set_default_host(struct irq_domain *host); 216extern void irq_set_default_host(struct irq_domain *host);
217extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
218 irq_hw_number_t hwirq, int node);
209 219
210static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) 220static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
211{ 221{
@@ -335,6 +345,11 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
335 const u32 *intspec, unsigned int intsize, 345 const u32 *intspec, unsigned int intsize,
336 irq_hw_number_t *out_hwirq, unsigned int *out_type); 346 irq_hw_number_t *out_hwirq, unsigned int *out_type);
337 347
348/* IPI functions */
349unsigned int irq_reserve_ipi(struct irq_domain *domain,
350 const struct cpumask *dest);
351void irq_destroy_ipi(unsigned int irq);
352
338/* V2 interfaces to support hierarchy IRQ domains. */ 353/* V2 interfaces to support hierarchy IRQ domains. */
339extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 354extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
340 unsigned int virq); 355 unsigned int virq);
@@ -400,6 +415,22 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
400{ 415{
401 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; 416 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
402} 417}
418
419static inline bool irq_domain_is_ipi(struct irq_domain *domain)
420{
421 return domain->flags &
422 (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
423}
424
425static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
426{
427 return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU;
428}
429
430static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
431{
432 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
433}
403#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 434#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
404static inline void irq_domain_activate_irq(struct irq_data *data) { } 435static inline void irq_domain_activate_irq(struct irq_data *data) { }
405static inline void irq_domain_deactivate_irq(struct irq_data *data) { } 436static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -413,6 +444,21 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
413{ 444{
414 return false; 445 return false;
415} 446}
447
448static inline bool irq_domain_is_ipi(struct irq_domain *domain)
449{
450 return false;
451}
452
453static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
454{
455 return false;
456}
457
458static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
459{
460 return false;
461}
416#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 462#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
417 463
418#else /* CONFIG_IRQ_DOMAIN */ 464#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index 2a8b1659bf35..548d55395488 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -23,6 +23,7 @@ enum iscsi_boot_eth_properties_enum {
23 ISCSI_BOOT_ETH_INDEX, 23 ISCSI_BOOT_ETH_INDEX,
24 ISCSI_BOOT_ETH_FLAGS, 24 ISCSI_BOOT_ETH_FLAGS,
25 ISCSI_BOOT_ETH_IP_ADDR, 25 ISCSI_BOOT_ETH_IP_ADDR,
26 ISCSI_BOOT_ETH_PREFIX_LEN,
26 ISCSI_BOOT_ETH_SUBNET_MASK, 27 ISCSI_BOOT_ETH_SUBNET_MASK,
27 ISCSI_BOOT_ETH_ORIGIN, 28 ISCSI_BOOT_ETH_ORIGIN,
28 ISCSI_BOOT_ETH_GATEWAY, 29 ISCSI_BOOT_ETH_GATEWAY,
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index 1e9a0f2a8626..df97c8444f5d 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -319,6 +319,7 @@ typedef struct modem_info {
319 int online; /* 1 = B-Channel is up, drop data */ 319 int online; /* 1 = B-Channel is up, drop data */
320 /* 2 = B-Channel is up, deliver d.*/ 320 /* 2 = B-Channel is up, deliver d.*/
321 int dialing; /* Dial in progress or ATA */ 321 int dialing; /* Dial in progress or ATA */
322 int closing;
322 int rcvsched; /* Receive needs schedule */ 323 int rcvsched; /* Receive needs schedule */
323 int isdn_driver; /* Index to isdn-driver */ 324 int isdn_driver; /* Index to isdn-driver */
324 int isdn_channel; /* Index to isdn-channel */ 325 int isdn_channel; /* Index to isdn-channel */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 65407f6c9120..fd1083c46c61 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -200,7 +200,7 @@ typedef struct journal_block_tag_s
200 __be32 t_blocknr_high; /* most-significant high 32bits. */ 200 __be32 t_blocknr_high; /* most-significant high 32bits. */
201} journal_block_tag_t; 201} journal_block_tag_t;
202 202
203/* Tail of descriptor block, for checksumming */ 203/* Tail of descriptor or revoke block, for checksumming */
204struct jbd2_journal_block_tail { 204struct jbd2_journal_block_tail {
205 __be32 t_checksum; /* crc32c(uuid+descr_block) */ 205 __be32 t_checksum; /* crc32c(uuid+descr_block) */
206}; 206};
@@ -215,11 +215,6 @@ typedef struct jbd2_journal_revoke_header_s
215 __be32 r_count; /* Count of bytes used in the block */ 215 __be32 r_count; /* Count of bytes used in the block */
216} jbd2_journal_revoke_header_t; 216} jbd2_journal_revoke_header_t;
217 217
218/* Tail of revoke block, for checksumming */
219struct jbd2_journal_revoke_tail {
220 __be32 r_checksum; /* crc32c(uuid+revoke_block) */
221};
222
223/* Definitions for the journal tag flags word: */ 218/* Definitions for the journal tag flags word: */
224#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */ 219#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */
225#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */ 220#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */
@@ -1137,7 +1132,8 @@ static inline void jbd2_unfile_log_bh(struct buffer_head *bh)
1137} 1132}
1138 1133
1139/* Log buffer allocation */ 1134/* Log buffer allocation */
1140struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal); 1135struct buffer_head *jbd2_journal_get_descriptor_buffer(transaction_t *, int);
1136void jbd2_descriptor_block_csum_set(journal_t *, struct buffer_head *);
1141int jbd2_journal_next_log_block(journal_t *, unsigned long long *); 1137int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
1142int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, 1138int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
1143 unsigned long *block); 1139 unsigned long *block);
@@ -1327,10 +1323,8 @@ extern int jbd2_journal_init_revoke_caches(void);
1327extern void jbd2_journal_destroy_revoke(journal_t *); 1323extern void jbd2_journal_destroy_revoke(journal_t *);
1328extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); 1324extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
1329extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); 1325extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
1330extern void jbd2_journal_write_revoke_records(journal_t *journal, 1326extern void jbd2_journal_write_revoke_records(transaction_t *transaction,
1331 transaction_t *transaction, 1327 struct list_head *log_bufs);
1332 struct list_head *log_bufs,
1333 int write_op);
1334 1328
1335/* Recovery revoke support */ 1329/* Recovery revoke support */
1336extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); 1330extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 0fdc798e3ff7..737371b56044 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -48,19 +48,28 @@ void kasan_unpoison_task_stack(struct task_struct *task);
48void kasan_alloc_pages(struct page *page, unsigned int order); 48void kasan_alloc_pages(struct page *page, unsigned int order);
49void kasan_free_pages(struct page *page, unsigned int order); 49void kasan_free_pages(struct page *page, unsigned int order);
50 50
51void kasan_cache_create(struct kmem_cache *cache, size_t *size,
52 unsigned long *flags);
53
51void kasan_poison_slab(struct page *page); 54void kasan_poison_slab(struct page *page);
52void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 55void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
53void kasan_poison_object_data(struct kmem_cache *cache, void *object); 56void kasan_poison_object_data(struct kmem_cache *cache, void *object);
54 57
55void kasan_kmalloc_large(const void *ptr, size_t size); 58void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
56void kasan_kfree_large(const void *ptr); 59void kasan_kfree_large(const void *ptr);
57void kasan_kfree(void *ptr); 60void kasan_kfree(void *ptr);
58void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size); 61void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
59void kasan_krealloc(const void *object, size_t new_size); 62 gfp_t flags);
63void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
60 64
61void kasan_slab_alloc(struct kmem_cache *s, void *object); 65void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
62void kasan_slab_free(struct kmem_cache *s, void *object); 66void kasan_slab_free(struct kmem_cache *s, void *object);
63 67
68struct kasan_cache {
69 int alloc_meta_offset;
70 int free_meta_offset;
71};
72
64int kasan_module_alloc(void *addr, size_t size); 73int kasan_module_alloc(void *addr, size_t size);
65void kasan_free_shadow(const struct vm_struct *vm); 74void kasan_free_shadow(const struct vm_struct *vm);
66 75
@@ -76,20 +85,26 @@ static inline void kasan_disable_current(void) {}
76static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} 85static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
77static inline void kasan_free_pages(struct page *page, unsigned int order) {} 86static inline void kasan_free_pages(struct page *page, unsigned int order) {}
78 87
88static inline void kasan_cache_create(struct kmem_cache *cache,
89 size_t *size,
90 unsigned long *flags) {}
91
79static inline void kasan_poison_slab(struct page *page) {} 92static inline void kasan_poison_slab(struct page *page) {}
80static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 93static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
81 void *object) {} 94 void *object) {}
82static inline void kasan_poison_object_data(struct kmem_cache *cache, 95static inline void kasan_poison_object_data(struct kmem_cache *cache,
83 void *object) {} 96 void *object) {}
84 97
85static inline void kasan_kmalloc_large(void *ptr, size_t size) {} 98static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
86static inline void kasan_kfree_large(const void *ptr) {} 99static inline void kasan_kfree_large(const void *ptr) {}
87static inline void kasan_kfree(void *ptr) {} 100static inline void kasan_kfree(void *ptr) {}
88static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, 101static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
89 size_t size) {} 102 size_t size, gfp_t flags) {}
90static inline void kasan_krealloc(const void *object, size_t new_size) {} 103static inline void kasan_krealloc(const void *object, size_t new_size,
104 gfp_t flags) {}
91 105
92static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} 106static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
107 gfp_t flags) {}
93static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} 108static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
94 109
95static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 110static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
new file mode 100644
index 000000000000..2883ac98c280
--- /dev/null
+++ b/include/linux/kcov.h
@@ -0,0 +1,29 @@
1#ifndef _LINUX_KCOV_H
2#define _LINUX_KCOV_H
3
4#include <uapi/linux/kcov.h>
5
6struct task_struct;
7
8#ifdef CONFIG_KCOV
9
10void kcov_task_init(struct task_struct *t);
11void kcov_task_exit(struct task_struct *t);
12
13enum kcov_mode {
14 /* Coverage collection is not enabled yet. */
15 KCOV_MODE_DISABLED = 0,
16 /*
17 * Tracing coverage collection mode.
18 * Covered PCs are collected in a per-task buffer.
19 */
20 KCOV_MODE_TRACE = 1,
21};
22
23#else
24
25static inline void kcov_task_init(struct task_struct *t) {}
26static inline void kcov_task_exit(struct task_struct *t) {}
27
28#endif /* CONFIG_KCOV */
29#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f31638c6e873..2f7775e229b0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -64,7 +64,7 @@
64#define round_down(x, y) ((x) & ~__round_mask(x, y)) 64#define round_down(x, y) ((x) & ~__round_mask(x, y))
65 65
66#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 66#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
67#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 67#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
68#define DIV_ROUND_UP_ULL(ll,d) \ 68#define DIV_ROUND_UP_ULL(ll,d) \
69 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) 69 ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
70 70
@@ -255,7 +255,7 @@ extern long (*panic_blink)(int state);
255__printf(1, 2) 255__printf(1, 2)
256void panic(const char *fmt, ...) 256void panic(const char *fmt, ...)
257 __noreturn __cold; 257 __noreturn __cold;
258void nmi_panic_self_stop(struct pt_regs *); 258void nmi_panic(struct pt_regs *regs, const char *msg);
259extern void oops_enter(void); 259extern void oops_enter(void);
260extern void oops_exit(void); 260extern void oops_exit(void);
261void print_oops_end_marker(void); 261void print_oops_end_marker(void);
@@ -357,6 +357,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
357int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); 357int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
358int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); 358int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
359int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); 359int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
360int __must_check kstrtobool(const char *s, bool *res);
360 361
361int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); 362int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
362int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); 363int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
@@ -368,6 +369,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne
368int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); 369int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
369int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); 370int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
370int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); 371int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
372int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
371 373
372static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) 374static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
373{ 375{
@@ -455,25 +457,6 @@ extern atomic_t panic_cpu;
455#define PANIC_CPU_INVALID -1 457#define PANIC_CPU_INVALID -1
456 458
457/* 459/*
458 * A variant of panic() called from NMI context. We return if we've already
459 * panicked on this CPU. If another CPU already panicked, loop in
460 * nmi_panic_self_stop() which can provide architecture dependent code such
461 * as saving register state for crash dump.
462 */
463#define nmi_panic(regs, fmt, ...) \
464do { \
465 int old_cpu, cpu; \
466 \
467 cpu = raw_smp_processor_id(); \
468 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
469 \
470 if (old_cpu == PANIC_CPU_INVALID) \
471 panic(fmt, ##__VA_ARGS__); \
472 else if (old_cpu != cpu) \
473 nmi_panic_self_stop(regs); \
474} while (0)
475
476/*
477 * Only to be used by arch init code. If the user over-wrote the default 460 * Only to be used by arch init code. If the user over-wrote the default
478 * CONFIG_PANIC_TIMEOUT, honor it. 461 * CONFIG_PANIC_TIMEOUT, honor it.
479 */ 462 */
@@ -635,7 +618,7 @@ do { \
635 618
636#define do_trace_printk(fmt, args...) \ 619#define do_trace_printk(fmt, args...) \
637do { \ 620do { \
638 static const char *trace_printk_fmt \ 621 static const char *trace_printk_fmt __used \
639 __attribute__((section("__trace_printk_fmt"))) = \ 622 __attribute__((section("__trace_printk_fmt"))) = \
640 __builtin_constant_p(fmt) ? fmt : NULL; \ 623 __builtin_constant_p(fmt) ? fmt : NULL; \
641 \ 624 \
@@ -679,7 +662,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
679 */ 662 */
680 663
681#define trace_puts(str) ({ \ 664#define trace_puts(str) ({ \
682 static const char *trace_printk_fmt \ 665 static const char *trace_printk_fmt __used \
683 __attribute__((section("__trace_printk_fmt"))) = \ 666 __attribute__((section("__trace_printk_fmt"))) = \
684 __builtin_constant_p(str) ? str : NULL; \ 667 __builtin_constant_p(str) ? str : NULL; \
685 \ 668 \
@@ -701,7 +684,7 @@ extern void trace_dump_stack(int skip);
701#define ftrace_vprintk(fmt, vargs) \ 684#define ftrace_vprintk(fmt, vargs) \
702do { \ 685do { \
703 if (__builtin_constant_p(fmt)) { \ 686 if (__builtin_constant_p(fmt)) { \
704 static const char *trace_printk_fmt \ 687 static const char *trace_printk_fmt __used \
705 __attribute__((section("__trace_printk_fmt"))) = \ 688 __attribute__((section("__trace_printk_fmt"))) = \
706 __builtin_constant_p(fmt) ? fmt : NULL; \ 689 __builtin_constant_p(fmt) ? fmt : NULL; \
707 \ 690 \
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index af51df35d749..30f089ebe0a4 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -152,6 +152,8 @@ struct kernfs_syscall_ops {
152 int (*rmdir)(struct kernfs_node *kn); 152 int (*rmdir)(struct kernfs_node *kn);
153 int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent, 153 int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
154 const char *new_name); 154 const char *new_name);
155 int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
156 struct kernfs_root *root);
155}; 157};
156 158
157struct kernfs_root { 159struct kernfs_root {
@@ -267,8 +269,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
267 269
268int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); 270int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
269size_t kernfs_path_len(struct kernfs_node *kn); 271size_t kernfs_path_len(struct kernfs_node *kn);
270char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, 272int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
271 size_t buflen); 273 char *buf, size_t buflen);
274char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen);
272void pr_cont_kernfs_name(struct kernfs_node *kn); 275void pr_cont_kernfs_name(struct kernfs_node *kn);
273void pr_cont_kernfs_path(struct kernfs_node *kn); 276void pr_cont_kernfs_path(struct kernfs_node *kn);
274struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); 277struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
@@ -283,6 +286,8 @@ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
283struct kernfs_root *kernfs_root_from_sb(struct super_block *sb); 286struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
284struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn); 287struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
285 288
289struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
290 struct super_block *sb);
286struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, 291struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
287 unsigned int flags, void *priv); 292 unsigned int flags, void *priv);
288void kernfs_destroy_root(struct kernfs_root *root); 293void kernfs_destroy_root(struct kernfs_root *root);
@@ -338,8 +343,8 @@ static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
338static inline size_t kernfs_path_len(struct kernfs_node *kn) 343static inline size_t kernfs_path_len(struct kernfs_node *kn)
339{ return 0; } 344{ return 0; }
340 345
341static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, 346static inline char *kernfs_path(struct kernfs_node *kn, char *buf,
342 size_t buflen) 347 size_t buflen)
343{ return NULL; } 348{ return NULL; }
344 349
345static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { } 350static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
diff --git a/include/linux/key.h b/include/linux/key.h
index 7321ab8ef949..5f5b1129dc92 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -219,6 +219,7 @@ extern struct key *key_alloc(struct key_type *type,
219#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ 219#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
220#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ 220#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
221#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */ 221#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
222#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */
222 223
223extern void key_revoke(struct key *key); 224extern void key_revoke(struct key *key);
224extern void key_invalidate(struct key *key); 225extern void key_invalidate(struct key *key);
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 473b43678ad1..41eb6fdf87a8 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -401,7 +401,7 @@ __kfifo_int_must_check_helper( \
401 ((typeof(__tmp->type))__kfifo->data) : \ 401 ((typeof(__tmp->type))__kfifo->data) : \
402 (__tmp->buf) \ 402 (__tmp->buf) \
403 )[__kfifo->in & __tmp->kfifo.mask] = \ 403 )[__kfifo->in & __tmp->kfifo.mask] = \
404 (typeof(*__tmp->type))__val; \ 404 *(typeof(__tmp->type))&__val; \
405 smp_wmb(); \ 405 smp_wmb(); \
406 __kfifo->in++; \ 406 __kfifo->in++; \
407 } \ 407 } \
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 861f690aa791..5276fe0916fc 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -25,6 +25,7 @@
25#include <linux/irqflags.h> 25#include <linux/irqflags.h>
26#include <linux/context_tracking.h> 26#include <linux/context_tracking.h>
27#include <linux/irqbypass.h> 27#include <linux/irqbypass.h>
28#include <linux/swait.h>
28#include <asm/signal.h> 29#include <asm/signal.h>
29 30
30#include <linux/kvm.h> 31#include <linux/kvm.h>
@@ -218,7 +219,7 @@ struct kvm_vcpu {
218 int fpu_active; 219 int fpu_active;
219 int guest_fpu_loaded, guest_xcr0_loaded; 220 int guest_fpu_loaded, guest_xcr0_loaded;
220 unsigned char fpu_counter; 221 unsigned char fpu_counter;
221 wait_queue_head_t wq; 222 struct swait_queue_head wq;
222 struct pid *pid; 223 struct pid *pid;
223 int sigset_active; 224 int sigset_active;
224 sigset_t sigset; 225 sigset_t sigset;
@@ -782,7 +783,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
782} 783}
783#endif 784#endif
784 785
785static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 786static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
786{ 787{
787#ifdef __KVM_HAVE_ARCH_WQP 788#ifdef __KVM_HAVE_ARCH_WQP
788 return vcpu->arch.wqp; 789 return vcpu->arch.wqp;
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index e23121f9d82a..59ccab297ae0 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
37 37
38void clear_all_latency_tracing(struct task_struct *p); 38void clear_all_latency_tracing(struct task_struct *p);
39 39
40extern int sysctl_latencytop(struct ctl_table *table, int write,
41 void __user *buffer, size_t *lenp, loff_t *ppos);
42
40#else 43#else
41 44
42static inline void 45static inline void
diff --git a/include/linux/leds.h b/include/linux/leds.h
index bc1476fda96e..f203a8f89d30 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -39,6 +39,7 @@ struct led_classdev {
39 39
40 /* Lower 16 bits reflect status */ 40 /* Lower 16 bits reflect status */
41#define LED_SUSPENDED (1 << 0) 41#define LED_SUSPENDED (1 << 0)
42#define LED_UNREGISTERING (1 << 1)
42 /* Upper 16 bits reflect control information */ 43 /* Upper 16 bits reflect control information */
43#define LED_CORE_SUSPENDRESUME (1 << 16) 44#define LED_CORE_SUSPENDRESUME (1 << 16)
44#define LED_BLINK_ONESHOT (1 << 17) 45#define LED_BLINK_ONESHOT (1 << 17)
@@ -48,9 +49,12 @@ struct led_classdev {
48#define LED_BLINK_DISABLE (1 << 21) 49#define LED_BLINK_DISABLE (1 << 21)
49#define LED_SYSFS_DISABLE (1 << 22) 50#define LED_SYSFS_DISABLE (1 << 22)
50#define LED_DEV_CAP_FLASH (1 << 23) 51#define LED_DEV_CAP_FLASH (1 << 23)
52#define LED_HW_PLUGGABLE (1 << 24)
51 53
52 /* Set LED brightness level */ 54 /* Set LED brightness level
53 /* Must not sleep, use a workqueue if needed */ 55 * Must not sleep. Use brightness_set_blocking for drivers
56 * that can sleep while setting brightness.
57 */
54 void (*brightness_set)(struct led_classdev *led_cdev, 58 void (*brightness_set)(struct led_classdev *led_cdev,
55 enum led_brightness brightness); 59 enum led_brightness brightness);
56 /* 60 /*
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 141ffdd59960..833867b9ddc2 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -48,7 +48,7 @@ struct nvdimm;
48struct nvdimm_bus_descriptor; 48struct nvdimm_bus_descriptor;
49typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, 49typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
50 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 50 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
51 unsigned int buf_len); 51 unsigned int buf_len, int *cmd_rc);
52 52
53struct nd_namespace_label; 53struct nd_namespace_label;
54struct nvdimm_drvdata; 54struct nvdimm_drvdata;
@@ -71,6 +71,9 @@ struct nvdimm_bus_descriptor {
71 unsigned long dsm_mask; 71 unsigned long dsm_mask;
72 char *provider_name; 72 char *provider_name;
73 ndctl_fn ndctl; 73 ndctl_fn ndctl;
74 int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
75 int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
76 struct nvdimm *nvdimm, unsigned int cmd);
74}; 77};
75 78
76struct nd_cmd_desc { 79struct nd_cmd_desc {
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2190419bdf0a..cdcb2ccbefa8 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -92,9 +92,9 @@ enum {
92 NVM_ADDRMODE_CHANNEL = 1, 92 NVM_ADDRMODE_CHANNEL = 1,
93 93
94 /* Plane programming mode for LUN */ 94 /* Plane programming mode for LUN */
95 NVM_PLANE_SINGLE = 0, 95 NVM_PLANE_SINGLE = 1,
96 NVM_PLANE_DOUBLE = 1, 96 NVM_PLANE_DOUBLE = 2,
97 NVM_PLANE_QUAD = 2, 97 NVM_PLANE_QUAD = 4,
98 98
99 /* Status codes */ 99 /* Status codes */
100 NVM_RSP_SUCCESS = 0x0, 100 NVM_RSP_SUCCESS = 0x0,
@@ -242,6 +242,7 @@ struct nvm_rq {
242 uint16_t nr_pages; 242 uint16_t nr_pages;
243 uint16_t flags; 243 uint16_t flags;
244 244
245 u64 ppa_status; /* ppa media status */
245 int error; 246 int error;
246}; 247};
247 248
@@ -341,11 +342,12 @@ struct nvm_dev {
341 int lps_per_blk; 342 int lps_per_blk;
342 int *lptbl; 343 int *lptbl;
343 344
344 unsigned long total_pages;
345 unsigned long total_blocks; 345 unsigned long total_blocks;
346 unsigned long total_secs;
346 int nr_luns; 347 int nr_luns;
347 unsigned max_pages_per_blk; 348 unsigned max_pages_per_blk;
348 349
350 unsigned long *lun_map;
349 void *ppalist_pool; 351 void *ppalist_pool;
350 352
351 struct nvm_id identity; 353 struct nvm_id identity;
@@ -355,6 +357,7 @@ struct nvm_dev {
355 char name[DISK_NAME_LEN]; 357 char name[DISK_NAME_LEN];
356 358
357 struct mutex mlock; 359 struct mutex mlock;
360 spinlock_t lock;
358}; 361};
359 362
360static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, 363static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -465,8 +468,13 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
465typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, 468typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
466 unsigned long); 469 unsigned long);
467typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); 470typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
471typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
472typedef void (nvmm_release_lun)(struct nvm_dev *, int);
468typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); 473typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
469 474
475typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
476typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
477
470struct nvmm_type { 478struct nvmm_type {
471 const char *name; 479 const char *name;
472 unsigned int version[3]; 480 unsigned int version[3];
@@ -488,9 +496,15 @@ struct nvmm_type {
488 496
489 /* Configuration management */ 497 /* Configuration management */
490 nvmm_get_lun_fn *get_lun; 498 nvmm_get_lun_fn *get_lun;
499 nvmm_reserve_lun *reserve_lun;
500 nvmm_release_lun *release_lun;
491 501
492 /* Statistics */ 502 /* Statistics */
493 nvmm_lun_info_print_fn *lun_info_print; 503 nvmm_lun_info_print_fn *lun_info_print;
504
505 nvmm_get_area_fn *get_area;
506 nvmm_put_area_fn *put_area;
507
494 struct list_head list; 508 struct list_head list;
495}; 509};
496 510
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ee7229a6c06a..cb483305e1f5 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -48,7 +48,7 @@ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
48 48
49#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) 49#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
50 50
51static inline int hlist_bl_unhashed(const struct hlist_bl_node *h) 51static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h)
52{ 52{
53 return !h->pprev; 53 return !h->pprev;
54} 54}
@@ -68,7 +68,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h,
68 h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); 68 h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
69} 69}
70 70
71static inline int hlist_bl_empty(const struct hlist_bl_head *h) 71static inline bool hlist_bl_empty(const struct hlist_bl_head *h)
72{ 72{
73 return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); 73 return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK);
74} 74}
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index a8828652f794..bd830d590465 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -134,6 +134,15 @@ int klp_unregister_patch(struct klp_patch *);
134int klp_enable_patch(struct klp_patch *); 134int klp_enable_patch(struct klp_patch *);
135int klp_disable_patch(struct klp_patch *); 135int klp_disable_patch(struct klp_patch *);
136 136
137/* Called from the module loader during module coming/going states */
138int klp_module_coming(struct module *mod);
139void klp_module_going(struct module *mod);
140
141#else /* !CONFIG_LIVEPATCH */
142
143static inline int klp_module_coming(struct module *mod) { return 0; }
144static inline void klp_module_going(struct module *mod) { }
145
137#endif /* CONFIG_LIVEPATCH */ 146#endif /* CONFIG_LIVEPATCH */
138 147
139#endif /* _LINUX_LIVEPATCH_H_ */ 148#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4dca42fd32f5..d10ef06971b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
196 * We record lock dependency chains, so that we can cache them: 196 * We record lock dependency chains, so that we can cache them:
197 */ 197 */
198struct lock_chain { 198struct lock_chain {
199 u8 irq_context; 199 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
200 u8 depth; 200 unsigned int irq_context : 2,
201 u16 base; 201 depth : 6,
202 base : 24;
203 /* 4 byte hole */
202 struct hlist_node entry; 204 struct hlist_node entry;
203 u64 chain_key; 205 u64 chain_key;
204}; 206};
@@ -261,7 +263,6 @@ struct held_lock {
261/* 263/*
262 * Initialization, self-test and debugging-output methods: 264 * Initialization, self-test and debugging-output methods:
263 */ 265 */
264extern void lockdep_init(void);
265extern void lockdep_info(void); 266extern void lockdep_info(void);
266extern void lockdep_reset(void); 267extern void lockdep_reset(void);
267extern void lockdep_reset_lock(struct lockdep_map *lock); 268extern void lockdep_reset_lock(struct lockdep_map *lock);
@@ -392,7 +393,6 @@ static inline void lockdep_on(void)
392# define lockdep_set_current_reclaim_state(g) do { } while (0) 393# define lockdep_set_current_reclaim_state(g) do { } while (0)
393# define lockdep_clear_current_reclaim_state() do { } while (0) 394# define lockdep_clear_current_reclaim_state() do { } while (0)
394# define lockdep_trace_alloc(g) do { } while (0) 395# define lockdep_trace_alloc(g) do { } while (0)
395# define lockdep_init() do { } while (0)
396# define lockdep_info() do { } while (0) 396# define lockdep_info() do { } while (0)
397# define lockdep_init_map(lock, name, key, sub) \ 397# define lockdep_init_map(lock, name, key, sub) \
398 do { (void)(name); (void)(key); } while (0) 398 do { (void)(name); (void)(key); } while (0)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 71969de4058c..cdee11cbcdf1 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -541,25 +541,24 @@
541 * @inode points to the inode to use as a reference. 541 * @inode points to the inode to use as a reference.
542 * The current task must be the one that nominated @inode. 542 * The current task must be the one that nominated @inode.
543 * Return 0 if successful. 543 * Return 0 if successful.
544 * @kernel_fw_from_file:
545 * Load firmware from userspace (not called for built-in firmware).
546 * @file contains the file structure pointing to the file containing
547 * the firmware to load. This argument will be NULL if the firmware
548 * was loaded via the uevent-triggered blob-based interface exposed
549 * by CONFIG_FW_LOADER_USER_HELPER.
550 * @buf pointer to buffer containing firmware contents.
551 * @size length of the firmware contents.
552 * Return 0 if permission is granted.
553 * @kernel_module_request: 544 * @kernel_module_request:
554 * Ability to trigger the kernel to automatically upcall to userspace for 545 * Ability to trigger the kernel to automatically upcall to userspace for
555 * userspace to load a kernel module with the given name. 546 * userspace to load a kernel module with the given name.
556 * @kmod_name name of the module requested by the kernel 547 * @kmod_name name of the module requested by the kernel
557 * Return 0 if successful. 548 * Return 0 if successful.
558 * @kernel_module_from_file: 549 * @kernel_read_file:
559 * Load a kernel module from userspace. 550 * Read a file specified by userspace.
560 * @file contains the file structure pointing to the file containing 551 * @file contains the file structure pointing to the file being read
561 * the kernel module to load. If the module is being loaded from a blob, 552 * by the kernel.
562 * this argument will be NULL. 553 * @id kernel read file identifier
554 * Return 0 if permission is granted.
555 * @kernel_post_read_file:
556 * Read a file specified by userspace.
557 * @file contains the file structure pointing to the file being read
558 * by the kernel.
559 * @buf pointer to buffer containing the file contents.
560 * @size length of the file contents.
561 * @id kernel read file identifier
563 * Return 0 if permission is granted. 562 * Return 0 if permission is granted.
564 * @task_fix_setuid: 563 * @task_fix_setuid:
565 * Update the module's state after setting one or more of the user 564 * Update the module's state after setting one or more of the user
@@ -1454,9 +1453,11 @@ union security_list_options {
1454 void (*cred_transfer)(struct cred *new, const struct cred *old); 1453 void (*cred_transfer)(struct cred *new, const struct cred *old);
1455 int (*kernel_act_as)(struct cred *new, u32 secid); 1454 int (*kernel_act_as)(struct cred *new, u32 secid);
1456 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1455 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1457 int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
1458 int (*kernel_module_request)(char *kmod_name); 1456 int (*kernel_module_request)(char *kmod_name);
1459 int (*kernel_module_from_file)(struct file *file); 1457 int (*kernel_module_from_file)(struct file *file);
1458 int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
1459 int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
1460 enum kernel_read_file_id id);
1460 int (*task_fix_setuid)(struct cred *new, const struct cred *old, 1461 int (*task_fix_setuid)(struct cred *new, const struct cred *old,
1461 int flags); 1462 int flags);
1462 int (*task_setpgid)(struct task_struct *p, pid_t pgid); 1463 int (*task_setpgid)(struct task_struct *p, pid_t pgid);
@@ -1715,9 +1716,9 @@ struct security_hook_heads {
1715 struct list_head cred_transfer; 1716 struct list_head cred_transfer;
1716 struct list_head kernel_act_as; 1717 struct list_head kernel_act_as;
1717 struct list_head kernel_create_files_as; 1718 struct list_head kernel_create_files_as;
1718 struct list_head kernel_fw_from_file; 1719 struct list_head kernel_read_file;
1720 struct list_head kernel_post_read_file;
1719 struct list_head kernel_module_request; 1721 struct list_head kernel_module_request;
1720 struct list_head kernel_module_from_file;
1721 struct list_head task_fix_setuid; 1722 struct list_head task_fix_setuid;
1722 struct list_head task_setpgid; 1723 struct list_head task_setpgid;
1723 struct list_head task_getpgid; 1724 struct list_head task_getpgid;
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 246a3529ecf6..ac02c54520e9 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -596,7 +596,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
596} 596}
597 597
598extern void set_channel_address(struct mISDNchannel *, u_int, u_int); 598extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
599extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *); 599extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *);
600extern unsigned short mISDN_clock_get(void); 600extern unsigned short mISDN_clock_get(void);
601extern const char *mISDNDevName4ch(struct mISDNchannel *); 601extern const char *mISDNDevName4ch(struct mISDNchannel *);
602 602
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 6a392e7a723a..86c9a8b480c5 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -1,55 +1,52 @@
1/* 1#ifndef _LINUX_MBCACHE_H
2 File: linux/mbcache.h 2#define _LINUX_MBCACHE_H
3 3
4 (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> 4#include <linux/hash.h>
5*/ 5#include <linux/list_bl.h>
6struct mb_cache_entry { 6#include <linux/list.h>
7 struct list_head e_lru_list; 7#include <linux/atomic.h>
8 struct mb_cache *e_cache; 8#include <linux/fs.h>
9 unsigned short e_used;
10 unsigned short e_queued;
11 atomic_t e_refcnt;
12 struct block_device *e_bdev;
13 sector_t e_block;
14 struct hlist_bl_node e_block_list;
15 struct {
16 struct hlist_bl_node o_list;
17 unsigned int o_key;
18 } e_index;
19 struct hlist_bl_head *e_block_hash_p;
20 struct hlist_bl_head *e_index_hash_p;
21};
22 9
23struct mb_cache { 10struct mb_cache;
24 struct list_head c_cache_list;
25 const char *c_name;
26 atomic_t c_entry_count;
27 int c_max_entries;
28 int c_bucket_bits;
29 struct kmem_cache *c_entry_cache;
30 struct hlist_bl_head *c_block_hash;
31 struct hlist_bl_head *c_index_hash;
32};
33 11
34/* Functions on caches */ 12struct mb_cache_entry {
13 /* List of entries in cache - protected by cache->c_list_lock */
14 struct list_head e_list;
15 /* Hash table list - protected by hash chain bitlock */
16 struct hlist_bl_node e_hash_list;
17 atomic_t e_refcnt;
18 /* Key in hash - stable during lifetime of the entry */
19 u32 e_key;
20 u32 e_referenced:1;
21 u32 e_reusable:1;
22 /* Block number of hashed block - stable during lifetime of the entry */
23 sector_t e_block;
24};
35 25
36struct mb_cache *mb_cache_create(const char *, int); 26struct mb_cache *mb_cache_create(int bucket_bits);
37void mb_cache_shrink(struct block_device *); 27void mb_cache_destroy(struct mb_cache *cache);
38void mb_cache_destroy(struct mb_cache *);
39 28
40/* Functions on cache entries */ 29int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
30 sector_t block, bool reusable);
31void __mb_cache_entry_free(struct mb_cache_entry *entry);
32static inline int mb_cache_entry_put(struct mb_cache *cache,
33 struct mb_cache_entry *entry)
34{
35 if (!atomic_dec_and_test(&entry->e_refcnt))
36 return 0;
37 __mb_cache_entry_free(entry);
38 return 1;
39}
41 40
42struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t); 41void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
43int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, 42 sector_t block);
44 sector_t, unsigned int); 43struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
45void mb_cache_entry_release(struct mb_cache_entry *); 44 sector_t block);
46void mb_cache_entry_free(struct mb_cache_entry *);
47struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
48 struct block_device *,
49 sector_t);
50struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, 45struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
51 struct block_device *, 46 u32 key);
52 unsigned int); 47struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
53struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, 48 struct mb_cache_entry *entry);
54 struct block_device *, 49void mb_cache_entry_touch(struct mb_cache *cache,
55 unsigned int); 50 struct mb_cache_entry *entry);
51
52#endif /* _LINUX_MBCACHE_H */
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 1f7bc630d225..ea34a867caa0 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -69,6 +69,9 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo
69int mvebu_mbus_save_cpu_target(u32 *store_addr); 69int mvebu_mbus_save_cpu_target(u32 *store_addr);
70void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); 70void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
71void mvebu_mbus_get_pcie_io_aperture(struct resource *res); 71void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
72int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr);
73int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
74 u8 *attr);
72int mvebu_mbus_add_window_remap_by_id(unsigned int target, 75int mvebu_mbus_add_window_remap_by_id(unsigned int target,
73 unsigned int attribute, 76 unsigned int attribute,
74 phys_addr_t base, size_t size, 77 phys_addr_t base, size_t size,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 792c8981e633..1191d79aa495 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -28,6 +28,7 @@
28#include <linux/eventfd.h> 28#include <linux/eventfd.h>
29#include <linux/mmzone.h> 29#include <linux/mmzone.h>
30#include <linux/writeback.h> 30#include <linux/writeback.h>
31#include <linux/page-flags.h>
31 32
32struct mem_cgroup; 33struct mem_cgroup;
33struct page; 34struct page;
@@ -51,7 +52,10 @@ enum mem_cgroup_stat_index {
51 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 52 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
52 MEM_CGROUP_STAT_NSTATS, 53 MEM_CGROUP_STAT_NSTATS,
53 /* default hierarchy stats */ 54 /* default hierarchy stats */
54 MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, 55 MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
56 MEMCG_SLAB_RECLAIMABLE,
57 MEMCG_SLAB_UNRECLAIMABLE,
58 MEMCG_SOCK,
55 MEMCG_NR_STAT, 59 MEMCG_NR_STAT,
56}; 60};
57 61
@@ -89,6 +93,10 @@ enum mem_cgroup_events_target {
89}; 93};
90 94
91#ifdef CONFIG_MEMCG 95#ifdef CONFIG_MEMCG
96
97#define MEM_CGROUP_ID_SHIFT 16
98#define MEM_CGROUP_ID_MAX USHRT_MAX
99
92struct mem_cgroup_stat_cpu { 100struct mem_cgroup_stat_cpu {
93 long count[MEMCG_NR_STAT]; 101 long count[MEMCG_NR_STAT];
94 unsigned long events[MEMCG_NR_EVENTS]; 102 unsigned long events[MEMCG_NR_EVENTS];
@@ -265,6 +273,11 @@ struct mem_cgroup {
265 273
266extern struct mem_cgroup *root_mem_cgroup; 274extern struct mem_cgroup *root_mem_cgroup;
267 275
276static inline bool mem_cgroup_disabled(void)
277{
278 return !cgroup_subsys_enabled(memory_cgrp_subsys);
279}
280
268/** 281/**
269 * mem_cgroup_events - count memory events against a cgroup 282 * mem_cgroup_events - count memory events against a cgroup
270 * @memcg: the memory cgroup 283 * @memcg: the memory cgroup
@@ -291,7 +304,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
291void mem_cgroup_uncharge(struct page *page); 304void mem_cgroup_uncharge(struct page *page);
292void mem_cgroup_uncharge_list(struct list_head *page_list); 305void mem_cgroup_uncharge_list(struct list_head *page_list);
293 306
294void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); 307void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
295 308
296struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 309struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
297struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 310struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
@@ -312,6 +325,28 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
312 struct mem_cgroup_reclaim_cookie *); 325 struct mem_cgroup_reclaim_cookie *);
313void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 326void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
314 327
328static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
329{
330 if (mem_cgroup_disabled())
331 return 0;
332
333 return memcg->css.id;
334}
335
336/**
337 * mem_cgroup_from_id - look up a memcg from an id
338 * @id: the id to look up
339 *
340 * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
341 */
342static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
343{
344 struct cgroup_subsys_state *css;
345
346 css = css_from_id(id, &memory_cgrp_subsys);
347 return mem_cgroup_from_css(css);
348}
349
315/** 350/**
316 * parent_mem_cgroup - find the accounting parent of a memcg 351 * parent_mem_cgroup - find the accounting parent of a memcg
317 * @memcg: memcg whose parent to find 352 * @memcg: memcg whose parent to find
@@ -353,11 +388,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
353struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 388struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
354ino_t page_cgroup_ino(struct page *page); 389ino_t page_cgroup_ino(struct page *page);
355 390
356static inline bool mem_cgroup_disabled(void)
357{
358 return !cgroup_subsys_enabled(memory_cgrp_subsys);
359}
360
361static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 391static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
362{ 392{
363 if (mem_cgroup_disabled()) 393 if (mem_cgroup_disabled())
@@ -373,6 +403,9 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
373void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 403void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
374 int nr_pages); 404 int nr_pages);
375 405
406unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
407 int nid, unsigned int lru_mask);
408
376static inline 409static inline
377unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 410unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
378{ 411{
@@ -429,36 +462,43 @@ bool mem_cgroup_oom_synchronize(bool wait);
429extern int do_swap_account; 462extern int do_swap_account;
430#endif 463#endif
431 464
432struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); 465void lock_page_memcg(struct page *page);
433void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); 466void unlock_page_memcg(struct page *page);
434 467
435/** 468/**
436 * mem_cgroup_update_page_stat - update page state statistics 469 * mem_cgroup_update_page_stat - update page state statistics
437 * @memcg: memcg to account against 470 * @page: the page
438 * @idx: page state item to account 471 * @idx: page state item to account
439 * @val: number of pages (positive or negative) 472 * @val: number of pages (positive or negative)
440 * 473 *
441 * See mem_cgroup_begin_page_stat() for locking requirements. 474 * The @page must be locked or the caller must use lock_page_memcg()
475 * to prevent double accounting when the page is concurrently being
476 * moved to another memcg:
477 *
478 * lock_page(page) or lock_page_memcg(page)
479 * if (TestClearPageState(page))
480 * mem_cgroup_update_page_stat(page, state, -1);
481 * unlock_page(page) or unlock_page_memcg(page)
442 */ 482 */
443static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, 483static inline void mem_cgroup_update_page_stat(struct page *page,
444 enum mem_cgroup_stat_index idx, int val) 484 enum mem_cgroup_stat_index idx, int val)
445{ 485{
446 VM_BUG_ON(!rcu_read_lock_held()); 486 VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
447 487
448 if (memcg) 488 if (page->mem_cgroup)
449 this_cpu_add(memcg->stat->count[idx], val); 489 this_cpu_add(page->mem_cgroup->stat->count[idx], val);
450} 490}
451 491
452static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 492static inline void mem_cgroup_inc_page_stat(struct page *page,
453 enum mem_cgroup_stat_index idx) 493 enum mem_cgroup_stat_index idx)
454{ 494{
455 mem_cgroup_update_page_stat(memcg, idx, 1); 495 mem_cgroup_update_page_stat(page, idx, 1);
456} 496}
457 497
458static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, 498static inline void mem_cgroup_dec_page_stat(struct page *page,
459 enum mem_cgroup_stat_index idx) 499 enum mem_cgroup_stat_index idx)
460{ 500{
461 mem_cgroup_update_page_stat(memcg, idx, -1); 501 mem_cgroup_update_page_stat(page, idx, -1);
462} 502}
463 503
464unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 504unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -496,8 +536,17 @@ void mem_cgroup_split_huge_fixup(struct page *head);
496#endif 536#endif
497 537
498#else /* CONFIG_MEMCG */ 538#else /* CONFIG_MEMCG */
539
540#define MEM_CGROUP_ID_SHIFT 0
541#define MEM_CGROUP_ID_MAX 0
542
499struct mem_cgroup; 543struct mem_cgroup;
500 544
545static inline bool mem_cgroup_disabled(void)
546{
547 return true;
548}
549
501static inline void mem_cgroup_events(struct mem_cgroup *memcg, 550static inline void mem_cgroup_events(struct mem_cgroup *memcg,
502 enum mem_cgroup_events_index idx, 551 enum mem_cgroup_events_index idx,
503 unsigned int nr) 552 unsigned int nr)
@@ -539,7 +588,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
539{ 588{
540} 589}
541 590
542static inline void mem_cgroup_replace_page(struct page *old, struct page *new) 591static inline void mem_cgroup_migrate(struct page *old, struct page *new)
543{ 592{
544} 593}
545 594
@@ -580,9 +629,16 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
580{ 629{
581} 630}
582 631
583static inline bool mem_cgroup_disabled(void) 632static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
584{ 633{
585 return true; 634 return 0;
635}
636
637static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
638{
639 WARN_ON_ONCE(id);
640 /* XXX: This should always return root_mem_cgroup */
641 return NULL;
586} 642}
587 643
588static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 644static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -608,17 +664,23 @@ mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
608{ 664{
609} 665}
610 666
667static inline unsigned long
668mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
669 int nid, unsigned int lru_mask)
670{
671 return 0;
672}
673
611static inline void 674static inline void
612mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 675mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
613{ 676{
614} 677}
615 678
616static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 679static inline void lock_page_memcg(struct page *page)
617{ 680{
618 return NULL;
619} 681}
620 682
621static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 683static inline void unlock_page_memcg(struct page *page)
622{ 684{
623} 685}
624 686
@@ -644,12 +706,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
644 return false; 706 return false;
645} 707}
646 708
647static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 709static inline void mem_cgroup_inc_page_stat(struct page *page,
648 enum mem_cgroup_stat_index idx) 710 enum mem_cgroup_stat_index idx)
649{ 711{
650} 712}
651 713
652static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, 714static inline void mem_cgroup_dec_page_stat(struct page *page,
653 enum mem_cgroup_stat_index idx) 715 enum mem_cgroup_stat_index idx)
654{ 716{
655} 717}
@@ -743,11 +805,6 @@ static inline bool memcg_kmem_enabled(void)
743 return static_branch_unlikely(&memcg_kmem_enabled_key); 805 return static_branch_unlikely(&memcg_kmem_enabled_key);
744} 806}
745 807
746static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
747{
748 return memcg->kmem_state == KMEM_ONLINE;
749}
750
751/* 808/*
752 * In general, we'll do everything in our power to not incur in any overhead 809 * In general, we'll do everything in our power to not incur in any overhead
753 * for non-memcg users for the kmem functions. Not even a function call, if we 810 * for non-memcg users for the kmem functions. Not even a function call, if we
@@ -765,7 +822,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
765void __memcg_kmem_uncharge(struct page *page, int order); 822void __memcg_kmem_uncharge(struct page *page, int order);
766 823
767/* 824/*
768 * helper for acessing a memcg's index. It will be used as an index in the 825 * helper for accessing a memcg's index. It will be used as an index in the
769 * child cache array in kmem_cache, and also to derive its name. This function 826 * child cache array in kmem_cache, and also to derive its name. This function
770 * will return -1 when this is not a kmem-limited memcg. 827 * will return -1 when this is not a kmem-limited memcg.
771 */ 828 */
@@ -834,6 +891,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
834 if (memcg_kmem_enabled()) 891 if (memcg_kmem_enabled())
835 __memcg_kmem_put_cache(cachep); 892 __memcg_kmem_put_cache(cachep);
836} 893}
894
895/**
896 * memcg_kmem_update_page_stat - update kmem page state statistics
897 * @page: the page
898 * @idx: page state item to account
899 * @val: number of pages (positive or negative)
900 */
901static inline void memcg_kmem_update_page_stat(struct page *page,
902 enum mem_cgroup_stat_index idx, int val)
903{
904 if (memcg_kmem_enabled() && page->mem_cgroup)
905 this_cpu_add(page->mem_cgroup->stat->count[idx], val);
906}
907
837#else 908#else
838#define for_each_memcg_cache_index(_idx) \ 909#define for_each_memcg_cache_index(_idx) \
839 for (; NULL; ) 910 for (; NULL; )
@@ -843,11 +914,6 @@ static inline bool memcg_kmem_enabled(void)
843 return false; 914 return false;
844} 915}
845 916
846static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
847{
848 return false;
849}
850
851static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 917static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
852{ 918{
853 return 0; 919 return 0;
@@ -879,6 +945,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
879static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) 945static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
880{ 946{
881} 947}
948
949static inline void memcg_kmem_update_page_stat(struct page *page,
950 enum mem_cgroup_stat_index idx, int val)
951{
952}
882#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 953#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
883 954
884#endif /* _LINUX_MEMCONTROL_H */ 955#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 8b8d8d12348e..093607f90b91 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -109,6 +109,9 @@ extern void unregister_memory_notifier(struct notifier_block *nb);
109extern int register_memory_isolate_notifier(struct notifier_block *nb); 109extern int register_memory_isolate_notifier(struct notifier_block *nb);
110extern void unregister_memory_isolate_notifier(struct notifier_block *nb); 110extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
111extern int register_new_memory(int, struct mem_section *); 111extern int register_new_memory(int, struct mem_section *);
112extern int memory_block_change_state(struct memory_block *mem,
113 unsigned long to_state,
114 unsigned long from_state_req);
112#ifdef CONFIG_MEMORY_HOTREMOVE 115#ifdef CONFIG_MEMORY_HOTREMOVE
113extern int unregister_memory_section(struct mem_section *); 116extern int unregister_memory_section(struct mem_section *);
114#endif 117#endif
@@ -137,17 +140,6 @@ extern struct memory_block *find_memory_block(struct mem_section *);
137#endif 140#endif
138 141
139/* 142/*
140 * 'struct memory_accessor' is a generic interface to provide
141 * in-kernel access to persistent memory such as i2c or SPI EEPROMs
142 */
143struct memory_accessor {
144 ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset,
145 size_t count);
146 ssize_t (*write)(struct memory_accessor *, const char *buf,
147 off_t offset, size_t count);
148};
149
150/*
151 * Kernel text modification mutex, used for code patching. Users of this lock 143 * Kernel text modification mutex, used for code patching. Users of this lock
152 * can sleep. 144 * can sleep.
153 */ 145 */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 43405992d027..adbef586e696 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -99,6 +99,8 @@ extern void __online_page_free(struct page *page);
99 99
100extern int try_online_node(int nid); 100extern int try_online_node(int nid);
101 101
102extern bool memhp_auto_online;
103
102#ifdef CONFIG_MEMORY_HOTREMOVE 104#ifdef CONFIG_MEMORY_HOTREMOVE
103extern bool is_pageblock_removable_nolock(struct page *page); 105extern bool is_pageblock_removable_nolock(struct page *page);
104extern int arch_remove_memory(u64 start, u64 size); 106extern int arch_remove_memory(u64 start, u64 size);
@@ -196,6 +198,9 @@ void put_online_mems(void);
196void mem_hotplug_begin(void); 198void mem_hotplug_begin(void);
197void mem_hotplug_done(void); 199void mem_hotplug_done(void);
198 200
201extern void set_zone_contiguous(struct zone *zone);
202extern void clear_zone_contiguous(struct zone *zone);
203
199#else /* ! CONFIG_MEMORY_HOTPLUG */ 204#else /* ! CONFIG_MEMORY_HOTPLUG */
200/* 205/*
201 * Stub functions for when hotplug is off 206 * Stub functions for when hotplug is off
@@ -267,7 +272,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
267extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 272extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
268 void *arg, int (*func)(struct memory_block *, void *)); 273 void *arg, int (*func)(struct memory_block *, void *));
269extern int add_memory(int nid, u64 start, u64 size); 274extern int add_memory(int nid, u64 start, u64 size);
270extern int add_memory_resource(int nid, struct resource *resource); 275extern int add_memory_resource(int nid, struct resource *resource, bool online);
271extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, 276extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
272 bool for_device); 277 bool for_device);
273extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); 278extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
index 38452ce1e892..34cc85864be5 100644
--- a/include/linux/mfd/as3711.h
+++ b/include/linux/mfd/as3711.h
@@ -51,7 +51,8 @@
51#define AS3711_ASIC_ID_1 0x90 51#define AS3711_ASIC_ID_1 0x90
52#define AS3711_ASIC_ID_2 0x91 52#define AS3711_ASIC_ID_2 0x91
53 53
54#define AS3711_MAX_REGS 0x92 54#define AS3711_MAX_REG AS3711_ASIC_ID_2
55#define AS3711_NUM_REGS (AS3711_MAX_REG + 1)
55 56
56/* Regulators */ 57/* Regulators */
57enum { 58enum {
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index b24c771cebd5..d82e7d51372b 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -18,6 +18,7 @@ enum {
18 AXP202_ID, 18 AXP202_ID,
19 AXP209_ID, 19 AXP209_ID,
20 AXP221_ID, 20 AXP221_ID,
21 AXP223_ID,
21 AXP288_ID, 22 AXP288_ID,
22 NR_AXP20X_VARIANTS, 23 NR_AXP20X_VARIANTS,
23}; 24};
@@ -396,7 +397,7 @@ enum axp288_irqs {
396 397
397struct axp20x_dev { 398struct axp20x_dev {
398 struct device *dev; 399 struct device *dev;
399 struct i2c_client *i2c_client; 400 int irq;
400 struct regmap *regmap; 401 struct regmap *regmap;
401 struct regmap_irq_chip_data *regmap_irqc; 402 struct regmap_irq_chip_data *regmap_irqc;
402 long variant; 403 long variant;
@@ -462,4 +463,35 @@ static inline int axp20x_read_variable_width(struct regmap *regmap,
462 return result; 463 return result;
463} 464}
464 465
466/**
467 * axp20x_match_device(): Setup axp20x variant related fields
468 *
469 * @axp20x: axp20x device to setup (.dev field must be set)
470 * @dev: device associated with this axp20x device
471 *
472 * This lets the axp20x core configure the mfd cells and register maps
473 * for later use.
474 */
475int axp20x_match_device(struct axp20x_dev *axp20x);
476
477/**
478 * axp20x_device_probe(): Probe a configured axp20x device
479 *
480 * @axp20x: axp20x device to probe (must be configured)
481 *
482 * This function lets the axp20x core register the axp20x mfd devices
483 * and irqchip. The axp20x device passed in must be fully configured
484 * with axp20x_match_device, its irq set, and regmap created.
485 */
486int axp20x_device_probe(struct axp20x_dev *axp20x);
487
488/**
489 * axp20x_device_probe(): Remove a axp20x device
490 *
491 * @axp20x: axp20x device to remove
492 *
493 * This tells the axp20x core to remove the associated mfd devices
494 */
495int axp20x_device_remove(struct axp20x_dev *axp20x);
496
465#endif /* __LINUX_MFD_AXP20X_H */ 497#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 494682ce4bf3..a677c2bd485c 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -245,7 +245,7 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
245int cros_ec_register(struct cros_ec_device *ec_dev); 245int cros_ec_register(struct cros_ec_device *ec_dev);
246 246
247/** 247/**
248 * cros_ec_register - Query the protocol version supported by the ChromeOS EC 248 * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC
249 * 249 *
250 * @ec_dev: Device to register 250 * @ec_dev: Device to register
251 * @return 0 if ok, -ve on error 251 * @return 0 if ok, -ve on error
diff --git a/include/linux/mfd/imx25-tsadc.h b/include/linux/mfd/imx25-tsadc.h
new file mode 100644
index 000000000000..7fe4b8c3baac
--- /dev/null
+++ b/include/linux/mfd/imx25-tsadc.h
@@ -0,0 +1,140 @@
1#ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
2#define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
3
4struct regmap;
5struct clk;
6
7struct mx25_tsadc {
8 struct regmap *regs;
9 struct irq_domain *domain;
10 struct clk *clk;
11};
12
13#define MX25_TSC_TGCR 0x00
14#define MX25_TSC_TGSR 0x04
15#define MX25_TSC_TICR 0x08
16
17/* The same register layout for TC and GC queue */
18#define MX25_ADCQ_FIFO 0x00
19#define MX25_ADCQ_CR 0x04
20#define MX25_ADCQ_SR 0x08
21#define MX25_ADCQ_MR 0x0c
22#define MX25_ADCQ_ITEM_7_0 0x20
23#define MX25_ADCQ_ITEM_15_8 0x24
24#define MX25_ADCQ_CFG(n) (0x40 + ((n) * 0x4))
25
26#define MX25_ADCQ_MR_MASK 0xffffffff
27
28/* TGCR */
29#define MX25_TGCR_PDBTIME(x) ((x) << 25)
30#define MX25_TGCR_PDBTIME_MASK GENMASK(31, 25)
31#define MX25_TGCR_PDBEN BIT(24)
32#define MX25_TGCR_PDEN BIT(23)
33#define MX25_TGCR_ADCCLKCFG(x) ((x) << 16)
34#define MX25_TGCR_GET_ADCCLK(x) (((x) >> 16) & 0x1f)
35#define MX25_TGCR_INTREFEN BIT(10)
36#define MX25_TGCR_POWERMODE_MASK GENMASK(9, 8)
37#define MX25_TGCR_POWERMODE_SAVE (1 << 8)
38#define MX25_TGCR_POWERMODE_ON (2 << 8)
39#define MX25_TGCR_STLC BIT(5)
40#define MX25_TGCR_SLPC BIT(4)
41#define MX25_TGCR_FUNC_RST BIT(2)
42#define MX25_TGCR_TSC_RST BIT(1)
43#define MX25_TGCR_CLK_EN BIT(0)
44
45/* TGSR */
46#define MX25_TGSR_SLP_INT BIT(2)
47#define MX25_TGSR_GCQ_INT BIT(1)
48#define MX25_TGSR_TCQ_INT BIT(0)
49
50/* ADCQ_ITEM_* */
51#define _MX25_ADCQ_ITEM(item, x) ((x) << ((item) * 4))
52#define MX25_ADCQ_ITEM(item, x) ((item) >= 8 ? \
53 _MX25_ADCQ_ITEM((item) - 8, (x)) : _MX25_ADCQ_ITEM((item), (x)))
54
55/* ADCQ_FIFO (TCQFIFO and GCQFIFO) */
56#define MX25_ADCQ_FIFO_DATA(x) (((x) >> 4) & 0xfff)
57#define MX25_ADCQ_FIFO_ID(x) ((x) & 0xf)
58
59/* ADCQ_CR (TCQR and GCQR) */
60#define MX25_ADCQ_CR_PDCFG_LEVEL BIT(19)
61#define MX25_ADCQ_CR_PDMSK BIT(18)
62#define MX25_ADCQ_CR_FRST BIT(17)
63#define MX25_ADCQ_CR_QRST BIT(16)
64#define MX25_ADCQ_CR_RWAIT_MASK GENMASK(15, 12)
65#define MX25_ADCQ_CR_RWAIT(x) ((x) << 12)
66#define MX25_ADCQ_CR_WMRK_MASK GENMASK(11, 8)
67#define MX25_ADCQ_CR_WMRK(x) ((x) << 8)
68#define MX25_ADCQ_CR_LITEMID_MASK (0xf << 4)
69#define MX25_ADCQ_CR_LITEMID(x) ((x) << 4)
70#define MX25_ADCQ_CR_RPT BIT(3)
71#define MX25_ADCQ_CR_FQS BIT(2)
72#define MX25_ADCQ_CR_QSM_MASK GENMASK(1, 0)
73#define MX25_ADCQ_CR_QSM_PD 0x1
74#define MX25_ADCQ_CR_QSM_FQS 0x2
75#define MX25_ADCQ_CR_QSM_FQS_PD 0x3
76
77/* ADCQ_SR (TCQSR and GCQSR) */
78#define MX25_ADCQ_SR_FDRY BIT(15)
79#define MX25_ADCQ_SR_FULL BIT(14)
80#define MX25_ADCQ_SR_EMPT BIT(13)
81#define MX25_ADCQ_SR_FDN(x) (((x) >> 8) & 0x1f)
82#define MX25_ADCQ_SR_FRR BIT(6)
83#define MX25_ADCQ_SR_FUR BIT(5)
84#define MX25_ADCQ_SR_FOR BIT(4)
85#define MX25_ADCQ_SR_EOQ BIT(1)
86#define MX25_ADCQ_SR_PD BIT(0)
87
88/* ADCQ_MR (TCQMR and GCQMR) */
89#define MX25_ADCQ_MR_FDRY_DMA BIT(31)
90#define MX25_ADCQ_MR_FER_DMA BIT(22)
91#define MX25_ADCQ_MR_FUR_DMA BIT(21)
92#define MX25_ADCQ_MR_FOR_DMA BIT(20)
93#define MX25_ADCQ_MR_EOQ_DMA BIT(17)
94#define MX25_ADCQ_MR_PD_DMA BIT(16)
95#define MX25_ADCQ_MR_FDRY_IRQ BIT(15)
96#define MX25_ADCQ_MR_FER_IRQ BIT(6)
97#define MX25_ADCQ_MR_FUR_IRQ BIT(5)
98#define MX25_ADCQ_MR_FOR_IRQ BIT(4)
99#define MX25_ADCQ_MR_EOQ_IRQ BIT(1)
100#define MX25_ADCQ_MR_PD_IRQ BIT(0)
101
102/* ADCQ_CFG (TICR, TCC0-7,GCC0-7) */
103#define MX25_ADCQ_CFG_SETTLING_TIME(x) ((x) << 24)
104#define MX25_ADCQ_CFG_IGS (1 << 20)
105#define MX25_ADCQ_CFG_NOS_MASK GENMASK(19, 16)
106#define MX25_ADCQ_CFG_NOS(x) (((x) - 1) << 16)
107#define MX25_ADCQ_CFG_WIPER (1 << 15)
108#define MX25_ADCQ_CFG_YNLR (1 << 14)
109#define MX25_ADCQ_CFG_YPLL_HIGH (0 << 12)
110#define MX25_ADCQ_CFG_YPLL_OFF (1 << 12)
111#define MX25_ADCQ_CFG_YPLL_LOW (3 << 12)
112#define MX25_ADCQ_CFG_XNUR_HIGH (0 << 10)
113#define MX25_ADCQ_CFG_XNUR_OFF (1 << 10)
114#define MX25_ADCQ_CFG_XNUR_LOW (3 << 10)
115#define MX25_ADCQ_CFG_XPUL_HIGH (0 << 9)
116#define MX25_ADCQ_CFG_XPUL_OFF (1 << 9)
117#define MX25_ADCQ_CFG_REFP(sel) ((sel) << 7)
118#define MX25_ADCQ_CFG_REFP_YP MX25_ADCQ_CFG_REFP(0)
119#define MX25_ADCQ_CFG_REFP_XP MX25_ADCQ_CFG_REFP(1)
120#define MX25_ADCQ_CFG_REFP_EXT MX25_ADCQ_CFG_REFP(2)
121#define MX25_ADCQ_CFG_REFP_INT MX25_ADCQ_CFG_REFP(3)
122#define MX25_ADCQ_CFG_REFP_MASK GENMASK(8, 7)
123#define MX25_ADCQ_CFG_IN(sel) ((sel) << 4)
124#define MX25_ADCQ_CFG_IN_XP MX25_ADCQ_CFG_IN(0)
125#define MX25_ADCQ_CFG_IN_YP MX25_ADCQ_CFG_IN(1)
126#define MX25_ADCQ_CFG_IN_XN MX25_ADCQ_CFG_IN(2)
127#define MX25_ADCQ_CFG_IN_YN MX25_ADCQ_CFG_IN(3)
128#define MX25_ADCQ_CFG_IN_WIPER MX25_ADCQ_CFG_IN(4)
129#define MX25_ADCQ_CFG_IN_AUX0 MX25_ADCQ_CFG_IN(5)
130#define MX25_ADCQ_CFG_IN_AUX1 MX25_ADCQ_CFG_IN(6)
131#define MX25_ADCQ_CFG_IN_AUX2 MX25_ADCQ_CFG_IN(7)
132#define MX25_ADCQ_CFG_REFN(sel) ((sel) << 2)
133#define MX25_ADCQ_CFG_REFN_XN MX25_ADCQ_CFG_REFN(0)
134#define MX25_ADCQ_CFG_REFN_YN MX25_ADCQ_CFG_REFN(1)
135#define MX25_ADCQ_CFG_REFN_NGND MX25_ADCQ_CFG_REFN(2)
136#define MX25_ADCQ_CFG_REFN_NGND2 MX25_ADCQ_CFG_REFN(3)
137#define MX25_ADCQ_CFG_REFN_MASK GENMASK(3, 2)
138#define MX25_ADCQ_CFG_PENIACK (1 << 1)
139
140#endif /* _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index f5043490d67c..643dae777b43 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -437,14 +437,11 @@ enum max77686_irq {
437struct max77686_dev { 437struct max77686_dev {
438 struct device *dev; 438 struct device *dev;
439 struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */ 439 struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
440 struct i2c_client *rtc; /* slave addr 0x0c */
441 440
442 unsigned long type; 441 unsigned long type;
443 442
444 struct regmap *regmap; /* regmap for mfd */ 443 struct regmap *regmap; /* regmap for mfd */
445 struct regmap *rtc_regmap; /* regmap for rtc */
446 struct regmap_irq_chip_data *irq_data; 444 struct regmap_irq_chip_data *irq_data;
447 struct regmap_irq_chip_data *rtc_irq_data;
448 445
449 int irq; 446 int irq;
450 struct mutex irqlock; 447 struct mutex irqlock;
diff --git a/include/linux/mfd/mt6323/core.h b/include/linux/mfd/mt6323/core.h
new file mode 100644
index 000000000000..06d0ec3b1f8f
--- /dev/null
+++ b/include/linux/mfd/mt6323/core.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __MFD_MT6323_CORE_H__
10#define __MFD_MT6323_CORE_H__
11
12enum MT6323_IRQ_STATUS_numbers {
13 MT6323_IRQ_STATUS_SPKL_AB = 0,
14 MT6323_IRQ_STATUS_SPKL,
15 MT6323_IRQ_STATUS_BAT_L,
16 MT6323_IRQ_STATUS_BAT_H,
17 MT6323_IRQ_STATUS_WATCHDOG,
18 MT6323_IRQ_STATUS_PWRKEY,
19 MT6323_IRQ_STATUS_THR_L,
20 MT6323_IRQ_STATUS_THR_H,
21 MT6323_IRQ_STATUS_VBATON_UNDET,
22 MT6323_IRQ_STATUS_BVALID_DET,
23 MT6323_IRQ_STATUS_CHRDET,
24 MT6323_IRQ_STATUS_OV,
25 MT6323_IRQ_STATUS_LDO = 16,
26 MT6323_IRQ_STATUS_FCHRKEY,
27 MT6323_IRQ_STATUS_ACCDET,
28 MT6323_IRQ_STATUS_AUDIO,
29 MT6323_IRQ_STATUS_RTC,
30 MT6323_IRQ_STATUS_VPROC,
31 MT6323_IRQ_STATUS_VSYS,
32 MT6323_IRQ_STATUS_VPA,
33 MT6323_IRQ_STATUS_NR,
34};
35
36#endif /* __MFD_MT6323_CORE_H__ */
diff --git a/include/linux/mfd/mt6323/registers.h b/include/linux/mfd/mt6323/registers.h
new file mode 100644
index 000000000000..160f3c0e2589
--- /dev/null
+++ b/include/linux/mfd/mt6323/registers.h
@@ -0,0 +1,408 @@
1/*
2 * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __MFD_MT6323_REGISTERS_H__
10#define __MFD_MT6323_REGISTERS_H__
11
12/* PMIC Registers */
13#define MT6323_CHR_CON0 0x0000
14#define MT6323_CHR_CON1 0x0002
15#define MT6323_CHR_CON2 0x0004
16#define MT6323_CHR_CON3 0x0006
17#define MT6323_CHR_CON4 0x0008
18#define MT6323_CHR_CON5 0x000A
19#define MT6323_CHR_CON6 0x000C
20#define MT6323_CHR_CON7 0x000E
21#define MT6323_CHR_CON8 0x0010
22#define MT6323_CHR_CON9 0x0012
23#define MT6323_CHR_CON10 0x0014
24#define MT6323_CHR_CON11 0x0016
25#define MT6323_CHR_CON12 0x0018
26#define MT6323_CHR_CON13 0x001A
27#define MT6323_CHR_CON14 0x001C
28#define MT6323_CHR_CON15 0x001E
29#define MT6323_CHR_CON16 0x0020
30#define MT6323_CHR_CON17 0x0022
31#define MT6323_CHR_CON18 0x0024
32#define MT6323_CHR_CON19 0x0026
33#define MT6323_CHR_CON20 0x0028
34#define MT6323_CHR_CON21 0x002A
35#define MT6323_CHR_CON22 0x002C
36#define MT6323_CHR_CON23 0x002E
37#define MT6323_CHR_CON24 0x0030
38#define MT6323_CHR_CON25 0x0032
39#define MT6323_CHR_CON26 0x0034
40#define MT6323_CHR_CON27 0x0036
41#define MT6323_CHR_CON28 0x0038
42#define MT6323_CHR_CON29 0x003A
43#define MT6323_STRUP_CON0 0x003C
44#define MT6323_STRUP_CON2 0x003E
45#define MT6323_STRUP_CON3 0x0040
46#define MT6323_STRUP_CON4 0x0042
47#define MT6323_STRUP_CON5 0x0044
48#define MT6323_STRUP_CON6 0x0046
49#define MT6323_STRUP_CON7 0x0048
50#define MT6323_STRUP_CON8 0x004A
51#define MT6323_STRUP_CON9 0x004C
52#define MT6323_STRUP_CON10 0x004E
53#define MT6323_STRUP_CON11 0x0050
54#define MT6323_SPK_CON0 0x0052
55#define MT6323_SPK_CON1 0x0054
56#define MT6323_SPK_CON2 0x0056
57#define MT6323_SPK_CON6 0x005E
58#define MT6323_SPK_CON7 0x0060
59#define MT6323_SPK_CON8 0x0062
60#define MT6323_SPK_CON9 0x0064
61#define MT6323_SPK_CON10 0x0066
62#define MT6323_SPK_CON11 0x0068
63#define MT6323_SPK_CON12 0x006A
64#define MT6323_CID 0x0100
65#define MT6323_TOP_CKPDN0 0x0102
66#define MT6323_TOP_CKPDN0_SET 0x0104
67#define MT6323_TOP_CKPDN0_CLR 0x0106
68#define MT6323_TOP_CKPDN1 0x0108
69#define MT6323_TOP_CKPDN1_SET 0x010A
70#define MT6323_TOP_CKPDN1_CLR 0x010C
71#define MT6323_TOP_CKPDN2 0x010E
72#define MT6323_TOP_CKPDN2_SET 0x0110
73#define MT6323_TOP_CKPDN2_CLR 0x0112
74#define MT6323_TOP_RST_CON 0x0114
75#define MT6323_TOP_RST_CON_SET 0x0116
76#define MT6323_TOP_RST_CON_CLR 0x0118
77#define MT6323_TOP_RST_MISC 0x011A
78#define MT6323_TOP_RST_MISC_SET 0x011C
79#define MT6323_TOP_RST_MISC_CLR 0x011E
80#define MT6323_TOP_CKCON0 0x0120
81#define MT6323_TOP_CKCON0_SET 0x0122
82#define MT6323_TOP_CKCON0_CLR 0x0124
83#define MT6323_TOP_CKCON1 0x0126
84#define MT6323_TOP_CKCON1_SET 0x0128
85#define MT6323_TOP_CKCON1_CLR 0x012A
86#define MT6323_TOP_CKTST0 0x012C
87#define MT6323_TOP_CKTST1 0x012E
88#define MT6323_TOP_CKTST2 0x0130
89#define MT6323_TEST_OUT 0x0132
90#define MT6323_TEST_CON0 0x0134
91#define MT6323_TEST_CON1 0x0136
92#define MT6323_EN_STATUS0 0x0138
93#define MT6323_EN_STATUS1 0x013A
94#define MT6323_OCSTATUS0 0x013C
95#define MT6323_OCSTATUS1 0x013E
96#define MT6323_PGSTATUS 0x0140
97#define MT6323_CHRSTATUS 0x0142
98#define MT6323_TDSEL_CON 0x0144
99#define MT6323_RDSEL_CON 0x0146
100#define MT6323_SMT_CON0 0x0148
101#define MT6323_SMT_CON1 0x014A
102#define MT6323_SMT_CON2 0x014C
103#define MT6323_SMT_CON3 0x014E
104#define MT6323_SMT_CON4 0x0150
105#define MT6323_DRV_CON0 0x0152
106#define MT6323_DRV_CON1 0x0154
107#define MT6323_DRV_CON2 0x0156
108#define MT6323_DRV_CON3 0x0158
109#define MT6323_DRV_CON4 0x015A
110#define MT6323_SIMLS1_CON 0x015C
111#define MT6323_SIMLS2_CON 0x015E
112#define MT6323_INT_CON0 0x0160
113#define MT6323_INT_CON0_SET 0x0162
114#define MT6323_INT_CON0_CLR 0x0164
115#define MT6323_INT_CON1 0x0166
116#define MT6323_INT_CON1_SET 0x0168
117#define MT6323_INT_CON1_CLR 0x016A
118#define MT6323_INT_MISC_CON 0x016C
119#define MT6323_INT_MISC_CON_SET 0x016E
120#define MT6323_INT_MISC_CON_CLR 0x0170
121#define MT6323_INT_STATUS0 0x0172
122#define MT6323_INT_STATUS1 0x0174
123#define MT6323_OC_GEAR_0 0x0176
124#define MT6323_OC_GEAR_1 0x0178
125#define MT6323_OC_GEAR_2 0x017A
126#define MT6323_OC_CTL_VPROC 0x017C
127#define MT6323_OC_CTL_VSYS 0x017E
128#define MT6323_OC_CTL_VPA 0x0180
129#define MT6323_FQMTR_CON0 0x0182
130#define MT6323_FQMTR_CON1 0x0184
131#define MT6323_FQMTR_CON2 0x0186
132#define MT6323_RG_SPI_CON 0x0188
133#define MT6323_DEW_DIO_EN 0x018A
134#define MT6323_DEW_READ_TEST 0x018C
135#define MT6323_DEW_WRITE_TEST 0x018E
136#define MT6323_DEW_CRC_SWRST 0x0190
137#define MT6323_DEW_CRC_EN 0x0192
138#define MT6323_DEW_CRC_VAL 0x0194
139#define MT6323_DEW_DBG_MON_SEL 0x0196
140#define MT6323_DEW_CIPHER_KEY_SEL 0x0198
141#define MT6323_DEW_CIPHER_IV_SEL 0x019A
142#define MT6323_DEW_CIPHER_EN 0x019C
143#define MT6323_DEW_CIPHER_RDY 0x019E
144#define MT6323_DEW_CIPHER_MODE 0x01A0
145#define MT6323_DEW_CIPHER_SWRST 0x01A2
146#define MT6323_DEW_RDDMY_NO 0x01A4
147#define MT6323_DEW_RDATA_DLY_SEL 0x01A6
148#define MT6323_BUCK_CON0 0x0200
149#define MT6323_BUCK_CON1 0x0202
150#define MT6323_BUCK_CON2 0x0204
151#define MT6323_BUCK_CON3 0x0206
152#define MT6323_BUCK_CON4 0x0208
153#define MT6323_BUCK_CON5 0x020A
154#define MT6323_VPROC_CON0 0x020C
155#define MT6323_VPROC_CON1 0x020E
156#define MT6323_VPROC_CON2 0x0210
157#define MT6323_VPROC_CON3 0x0212
158#define MT6323_VPROC_CON4 0x0214
159#define MT6323_VPROC_CON5 0x0216
160#define MT6323_VPROC_CON7 0x021A
161#define MT6323_VPROC_CON8 0x021C
162#define MT6323_VPROC_CON9 0x021E
163#define MT6323_VPROC_CON10 0x0220
164#define MT6323_VPROC_CON11 0x0222
165#define MT6323_VPROC_CON12 0x0224
166#define MT6323_VPROC_CON13 0x0226
167#define MT6323_VPROC_CON14 0x0228
168#define MT6323_VPROC_CON15 0x022A
169#define MT6323_VPROC_CON18 0x0230
170#define MT6323_VSYS_CON0 0x0232
171#define MT6323_VSYS_CON1 0x0234
172#define MT6323_VSYS_CON2 0x0236
173#define MT6323_VSYS_CON3 0x0238
174#define MT6323_VSYS_CON4 0x023A
175#define MT6323_VSYS_CON5 0x023C
176#define MT6323_VSYS_CON7 0x0240
177#define MT6323_VSYS_CON8 0x0242
178#define MT6323_VSYS_CON9 0x0244
179#define MT6323_VSYS_CON10 0x0246
180#define MT6323_VSYS_CON11 0x0248
181#define MT6323_VSYS_CON12 0x024A
182#define MT6323_VSYS_CON13 0x024C
183#define MT6323_VSYS_CON14 0x024E
184#define MT6323_VSYS_CON15 0x0250
185#define MT6323_VSYS_CON18 0x0256
186#define MT6323_VPA_CON0 0x0300
187#define MT6323_VPA_CON1 0x0302
188#define MT6323_VPA_CON2 0x0304
189#define MT6323_VPA_CON3 0x0306
190#define MT6323_VPA_CON4 0x0308
191#define MT6323_VPA_CON5 0x030A
192#define MT6323_VPA_CON7 0x030E
193#define MT6323_VPA_CON8 0x0310
194#define MT6323_VPA_CON9 0x0312
195#define MT6323_VPA_CON10 0x0314
196#define MT6323_VPA_CON11 0x0316
197#define MT6323_VPA_CON12 0x0318
198#define MT6323_VPA_CON14 0x031C
199#define MT6323_VPA_CON16 0x0320
200#define MT6323_VPA_CON17 0x0322
201#define MT6323_VPA_CON18 0x0324
202#define MT6323_VPA_CON19 0x0326
203#define MT6323_VPA_CON20 0x0328
204#define MT6323_BUCK_K_CON0 0x032A
205#define MT6323_BUCK_K_CON1 0x032C
206#define MT6323_BUCK_K_CON2 0x032E
207#define MT6323_ISINK0_CON0 0x0330
208#define MT6323_ISINK0_CON1 0x0332
209#define MT6323_ISINK0_CON2 0x0334
210#define MT6323_ISINK0_CON3 0x0336
211#define MT6323_ISINK1_CON0 0x0338
212#define MT6323_ISINK1_CON1 0x033A
213#define MT6323_ISINK1_CON2 0x033C
214#define MT6323_ISINK1_CON3 0x033E
215#define MT6323_ISINK2_CON0 0x0340
216#define MT6323_ISINK2_CON1 0x0342
217#define MT6323_ISINK2_CON2 0x0344
218#define MT6323_ISINK2_CON3 0x0346
219#define MT6323_ISINK3_CON0 0x0348
220#define MT6323_ISINK3_CON1 0x034A
221#define MT6323_ISINK3_CON2 0x034C
222#define MT6323_ISINK3_CON3 0x034E
223#define MT6323_ISINK_ANA0 0x0350
224#define MT6323_ISINK_ANA1 0x0352
225#define MT6323_ISINK_PHASE_DLY 0x0354
226#define MT6323_ISINK_EN_CTRL 0x0356
227#define MT6323_ANALDO_CON0 0x0400
228#define MT6323_ANALDO_CON1 0x0402
229#define MT6323_ANALDO_CON2 0x0404
230#define MT6323_ANALDO_CON3 0x0406
231#define MT6323_ANALDO_CON4 0x0408
232#define MT6323_ANALDO_CON5 0x040A
233#define MT6323_ANALDO_CON6 0x040C
234#define MT6323_ANALDO_CON7 0x040E
235#define MT6323_ANALDO_CON8 0x0410
236#define MT6323_ANALDO_CON10 0x0412
237#define MT6323_ANALDO_CON15 0x0414
238#define MT6323_ANALDO_CON16 0x0416
239#define MT6323_ANALDO_CON17 0x0418
240#define MT6323_ANALDO_CON18 0x041A
241#define MT6323_ANALDO_CON19 0x041C
242#define MT6323_ANALDO_CON20 0x041E
243#define MT6323_ANALDO_CON21 0x0420
244#define MT6323_DIGLDO_CON0 0x0500
245#define MT6323_DIGLDO_CON2 0x0502
246#define MT6323_DIGLDO_CON3 0x0504
247#define MT6323_DIGLDO_CON5 0x0506
248#define MT6323_DIGLDO_CON6 0x0508
249#define MT6323_DIGLDO_CON7 0x050A
250#define MT6323_DIGLDO_CON8 0x050C
251#define MT6323_DIGLDO_CON9 0x050E
252#define MT6323_DIGLDO_CON10 0x0510
253#define MT6323_DIGLDO_CON11 0x0512
254#define MT6323_DIGLDO_CON12 0x0514
255#define MT6323_DIGLDO_CON13 0x0516
256#define MT6323_DIGLDO_CON14 0x0518
257#define MT6323_DIGLDO_CON15 0x051A
258#define MT6323_DIGLDO_CON16 0x051C
259#define MT6323_DIGLDO_CON17 0x051E
260#define MT6323_DIGLDO_CON18 0x0520
261#define MT6323_DIGLDO_CON19 0x0522
262#define MT6323_DIGLDO_CON20 0x0524
263#define MT6323_DIGLDO_CON21 0x0526
264#define MT6323_DIGLDO_CON23 0x0528
265#define MT6323_DIGLDO_CON24 0x052A
266#define MT6323_DIGLDO_CON26 0x052C
267#define MT6323_DIGLDO_CON27 0x052E
268#define MT6323_DIGLDO_CON28 0x0530
269#define MT6323_DIGLDO_CON29 0x0532
270#define MT6323_DIGLDO_CON30 0x0534
271#define MT6323_DIGLDO_CON31 0x0536
272#define MT6323_DIGLDO_CON32 0x0538
273#define MT6323_DIGLDO_CON33 0x053A
274#define MT6323_DIGLDO_CON34 0x053C
275#define MT6323_DIGLDO_CON35 0x053E
276#define MT6323_DIGLDO_CON36 0x0540
277#define MT6323_DIGLDO_CON39 0x0542
278#define MT6323_DIGLDO_CON40 0x0544
279#define MT6323_DIGLDO_CON41 0x0546
280#define MT6323_DIGLDO_CON42 0x0548
281#define MT6323_DIGLDO_CON43 0x054A
282#define MT6323_DIGLDO_CON44 0x054C
283#define MT6323_DIGLDO_CON45 0x054E
284#define MT6323_DIGLDO_CON46 0x0550
285#define MT6323_DIGLDO_CON47 0x0552
286#define MT6323_DIGLDO_CON48 0x0554
287#define MT6323_DIGLDO_CON49 0x0556
288#define MT6323_DIGLDO_CON50 0x0558
289#define MT6323_DIGLDO_CON51 0x055A
290#define MT6323_DIGLDO_CON52 0x055C
291#define MT6323_DIGLDO_CON53 0x055E
292#define MT6323_DIGLDO_CON54 0x0560
293#define MT6323_EFUSE_CON0 0x0600
294#define MT6323_EFUSE_CON1 0x0602
295#define MT6323_EFUSE_CON2 0x0604
296#define MT6323_EFUSE_CON3 0x0606
297#define MT6323_EFUSE_CON4 0x0608
298#define MT6323_EFUSE_CON5 0x060A
299#define MT6323_EFUSE_CON6 0x060C
300#define MT6323_EFUSE_VAL_0_15 0x060E
301#define MT6323_EFUSE_VAL_16_31 0x0610
302#define MT6323_EFUSE_VAL_32_47 0x0612
303#define MT6323_EFUSE_VAL_48_63 0x0614
304#define MT6323_EFUSE_VAL_64_79 0x0616
305#define MT6323_EFUSE_VAL_80_95 0x0618
306#define MT6323_EFUSE_VAL_96_111 0x061A
307#define MT6323_EFUSE_VAL_112_127 0x061C
308#define MT6323_EFUSE_VAL_128_143 0x061E
309#define MT6323_EFUSE_VAL_144_159 0x0620
310#define MT6323_EFUSE_VAL_160_175 0x0622
311#define MT6323_EFUSE_VAL_176_191 0x0624
312#define MT6323_EFUSE_DOUT_0_15 0x0626
313#define MT6323_EFUSE_DOUT_16_31 0x0628
314#define MT6323_EFUSE_DOUT_32_47 0x062A
315#define MT6323_EFUSE_DOUT_48_63 0x062C
316#define MT6323_EFUSE_DOUT_64_79 0x062E
317#define MT6323_EFUSE_DOUT_80_95 0x0630
318#define MT6323_EFUSE_DOUT_96_111 0x0632
319#define MT6323_EFUSE_DOUT_112_127 0x0634
320#define MT6323_EFUSE_DOUT_128_143 0x0636
321#define MT6323_EFUSE_DOUT_144_159 0x0638
322#define MT6323_EFUSE_DOUT_160_175 0x063A
323#define MT6323_EFUSE_DOUT_176_191 0x063C
324#define MT6323_EFUSE_CON7 0x063E
325#define MT6323_EFUSE_CON8 0x0640
326#define MT6323_EFUSE_CON9 0x0642
327#define MT6323_RTC_MIX_CON0 0x0644
328#define MT6323_RTC_MIX_CON1 0x0646
329#define MT6323_AUDTOP_CON0 0x0700
330#define MT6323_AUDTOP_CON1 0x0702
331#define MT6323_AUDTOP_CON2 0x0704
332#define MT6323_AUDTOP_CON3 0x0706
333#define MT6323_AUDTOP_CON4 0x0708
334#define MT6323_AUDTOP_CON5 0x070A
335#define MT6323_AUDTOP_CON6 0x070C
336#define MT6323_AUDTOP_CON7 0x070E
337#define MT6323_AUDTOP_CON8 0x0710
338#define MT6323_AUDTOP_CON9 0x0712
339#define MT6323_AUXADC_ADC0 0x0714
340#define MT6323_AUXADC_ADC1 0x0716
341#define MT6323_AUXADC_ADC2 0x0718
342#define MT6323_AUXADC_ADC3 0x071A
343#define MT6323_AUXADC_ADC4 0x071C
344#define MT6323_AUXADC_ADC5 0x071E
345#define MT6323_AUXADC_ADC6 0x0720
346#define MT6323_AUXADC_ADC7 0x0722
347#define MT6323_AUXADC_ADC8 0x0724
348#define MT6323_AUXADC_ADC9 0x0726
349#define MT6323_AUXADC_ADC10 0x0728
350#define MT6323_AUXADC_ADC11 0x072A
351#define MT6323_AUXADC_ADC12 0x072C
352#define MT6323_AUXADC_ADC13 0x072E
353#define MT6323_AUXADC_ADC14 0x0730
354#define MT6323_AUXADC_ADC15 0x0732
355#define MT6323_AUXADC_ADC16 0x0734
356#define MT6323_AUXADC_ADC17 0x0736
357#define MT6323_AUXADC_ADC18 0x0738
358#define MT6323_AUXADC_ADC19 0x073A
359#define MT6323_AUXADC_ADC20 0x073C
360#define MT6323_AUXADC_RSV1 0x073E
361#define MT6323_AUXADC_RSV2 0x0740
362#define MT6323_AUXADC_CON0 0x0742
363#define MT6323_AUXADC_CON1 0x0744
364#define MT6323_AUXADC_CON2 0x0746
365#define MT6323_AUXADC_CON3 0x0748
366#define MT6323_AUXADC_CON4 0x074A
367#define MT6323_AUXADC_CON5 0x074C
368#define MT6323_AUXADC_CON6 0x074E
369#define MT6323_AUXADC_CON7 0x0750
370#define MT6323_AUXADC_CON8 0x0752
371#define MT6323_AUXADC_CON9 0x0754
372#define MT6323_AUXADC_CON10 0x0756
373#define MT6323_AUXADC_CON11 0x0758
374#define MT6323_AUXADC_CON12 0x075A
375#define MT6323_AUXADC_CON13 0x075C
376#define MT6323_AUXADC_CON14 0x075E
377#define MT6323_AUXADC_CON15 0x0760
378#define MT6323_AUXADC_CON16 0x0762
379#define MT6323_AUXADC_CON17 0x0764
380#define MT6323_AUXADC_CON18 0x0766
381#define MT6323_AUXADC_CON19 0x0768
382#define MT6323_AUXADC_CON20 0x076A
383#define MT6323_AUXADC_CON21 0x076C
384#define MT6323_AUXADC_CON22 0x076E
385#define MT6323_AUXADC_CON23 0x0770
386#define MT6323_AUXADC_CON24 0x0772
387#define MT6323_AUXADC_CON25 0x0774
388#define MT6323_AUXADC_CON26 0x0776
389#define MT6323_AUXADC_CON27 0x0778
390#define MT6323_ACCDET_CON0 0x077A
391#define MT6323_ACCDET_CON1 0x077C
392#define MT6323_ACCDET_CON2 0x077E
393#define MT6323_ACCDET_CON3 0x0780
394#define MT6323_ACCDET_CON4 0x0782
395#define MT6323_ACCDET_CON5 0x0784
396#define MT6323_ACCDET_CON6 0x0786
397#define MT6323_ACCDET_CON7 0x0788
398#define MT6323_ACCDET_CON8 0x078A
399#define MT6323_ACCDET_CON9 0x078C
400#define MT6323_ACCDET_CON10 0x078E
401#define MT6323_ACCDET_CON11 0x0790
402#define MT6323_ACCDET_CON12 0x0792
403#define MT6323_ACCDET_CON13 0x0794
404#define MT6323_ACCDET_CON14 0x0796
405#define MT6323_ACCDET_CON15 0x0798
406#define MT6323_ACCDET_CON16 0x079A
407
408#endif /* __MFD_MT6323_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 45b8e8aa1fbf..d678f526e498 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -60,6 +60,8 @@ struct mt6397_chip {
60 u16 wake_mask[2]; 60 u16 wake_mask[2];
61 u16 irq_masks_cur[2]; 61 u16 irq_masks_cur[2];
62 u16 irq_masks_cache[2]; 62 u16 irq_masks_cache[2];
63 u16 int_con[2];
64 u16 int_status[2];
63}; 65};
64 66
65#endif /* __MFD_MT6397_CORE_H__ */ 67#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index c800dbc42079..5c9a1d44c125 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -580,7 +580,9 @@ struct palmas_usb {
580 int vbus_irq; 580 int vbus_irq;
581 581
582 int gpio_id_irq; 582 int gpio_id_irq;
583 int gpio_vbus_irq;
583 struct gpio_desc *id_gpiod; 584 struct gpio_desc *id_gpiod;
585 struct gpio_desc *vbus_gpiod;
584 unsigned long sw_debounce_jiffies; 586 unsigned long sw_debounce_jiffies;
585 struct delayed_work wq_detectid; 587 struct delayed_work wq_detectid;
586 588
@@ -589,6 +591,7 @@ struct palmas_usb {
589 bool enable_vbus_detection; 591 bool enable_vbus_detection;
590 bool enable_id_detection; 592 bool enable_id_detection;
591 bool enable_gpio_id_detection; 593 bool enable_gpio_id_detection;
594 bool enable_gpio_vbus_detection;
592}; 595};
593 596
594#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator) 597#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index fd413ccab915..8d0a392e0a7f 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -28,8 +28,6 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/regmap.h> 29#include <linux/regmap.h>
30 30
31#define RC5T583_MAX_REGS 0xF8
32
33/* Maximum number of main interrupts */ 31/* Maximum number of main interrupts */
34#define MAX_MAIN_INTERRUPT 5 32#define MAX_MAIN_INTERRUPT 5
35#define RC5T583_MAX_GPEDGE_REG 2 33#define RC5T583_MAX_GPEDGE_REG 2
@@ -169,6 +167,9 @@
169#define RC5T583_RTC_AY_MONTH 0xF3 167#define RC5T583_RTC_AY_MONTH 0xF3
170#define RC5T583_RTC_AY_YEAR 0xF4 168#define RC5T583_RTC_AY_YEAR 0xF4
171 169
170#define RC5T583_MAX_REG 0xF7
171#define RC5T583_NUM_REGS (RC5T583_MAX_REG + 1)
172
172/* RICOH_RC5T583 IRQ definitions */ 173/* RICOH_RC5T583 IRQ definitions */
173enum { 174enum {
174 RC5T583_IRQ_ONKEY, 175 RC5T583_IRQ_ONKEY,
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b288965e8101..2c14eeca46f0 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -173,10 +173,12 @@ enum s2mps11_regulators {
173 173
174#define S2MPS11_LDO_VSEL_MASK 0x3F 174#define S2MPS11_LDO_VSEL_MASK 0x3F
175#define S2MPS11_BUCK_VSEL_MASK 0xFF 175#define S2MPS11_BUCK_VSEL_MASK 0xFF
176#define S2MPS11_BUCK9_VSEL_MASK 0x1F
176#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) 177#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
177#define S2MPS11_ENABLE_SHIFT 0x06 178#define S2MPS11_ENABLE_SHIFT 0x06
178#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1) 179#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
179#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) 180#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
181#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
180#define S2MPS11_RAMP_DELAY 25000 /* uV/us */ 182#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
181 183
182#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4) 184#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 75e543b78f53..1088149be0c9 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -29,24 +29,24 @@ extern struct regmap *syscon_regmap_lookup_by_phandle(
29#else 29#else
30static inline struct regmap *syscon_node_to_regmap(struct device_node *np) 30static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
31{ 31{
32 return ERR_PTR(-ENOSYS); 32 return ERR_PTR(-ENOTSUPP);
33} 33}
34 34
35static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) 35static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
36{ 36{
37 return ERR_PTR(-ENOSYS); 37 return ERR_PTR(-ENOTSUPP);
38} 38}
39 39
40static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s) 40static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
41{ 41{
42 return ERR_PTR(-ENOSYS); 42 return ERR_PTR(-ENOTSUPP);
43} 43}
44 44
45static inline struct regmap *syscon_regmap_lookup_by_phandle( 45static inline struct regmap *syscon_regmap_lookup_by_phandle(
46 struct device_node *np, 46 struct device_node *np,
47 const char *property) 47 const char *property)
48{ 48{
49 return ERR_PTR(-ENOSYS); 49 return ERR_PTR(-ENOTSUPP);
50} 50}
51#endif 51#endif
52 52
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index 558a485d03ab..238c8db953eb 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -422,6 +422,7 @@
422#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26) 422#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26)
423#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26) 423#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26)
424#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26) 424#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26)
425#define IMX6SX_GPR5_PCIE_BTNRST_RESET BIT(19)
425#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4) 426#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4)
426#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4) 427#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4)
427#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4) 428#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4)
@@ -435,6 +436,10 @@
435#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) 436#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
436#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) 437#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
437 438
439#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30)
440#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0)
441#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
442
438/* For imx6ul iomux gpr register field define */ 443/* For imx6ul iomux gpr register field define */
439#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17) 444#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
440#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18) 445#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 24b86d538e88..05d58ee5e6a7 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -65,6 +65,10 @@
65 * Some controllers can support SDIO IRQ signalling. 65 * Some controllers can support SDIO IRQ signalling.
66 */ 66 */
67#define TMIO_MMC_SDIO_IRQ (1 << 2) 67#define TMIO_MMC_SDIO_IRQ (1 << 2)
68
69/* Some controllers don't need to wait 10ms for clock changes */
70#define TMIO_MMC_FAST_CLK_CHG (1 << 3)
71
68/* 72/*
69 * Some controllers require waiting for the SD bus to become 73 * Some controllers require waiting for the SD bus to become
70 * idle before writing to some registers. 74 * idle before writing to some registers.
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
new file mode 100644
index 000000000000..a228ae4c88d9
--- /dev/null
+++ b/include/linux/mfd/tps65086.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
3 * Andrew F. Davis <afd@ti.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether expressed or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License version 2 for more details.
13 *
14 * Based on the TPS65912 driver
15 */
16
17#ifndef __LINUX_MFD_TPS65086_H
18#define __LINUX_MFD_TPS65086_H
19
20#include <linux/device.h>
21#include <linux/regmap.h>
22
23/* List of registers for TPS65086 */
24#define TPS65086_DEVICEID 0x01
25#define TPS65086_IRQ 0x02
26#define TPS65086_IRQ_MASK 0x03
27#define TPS65086_PMICSTAT 0x04
28#define TPS65086_SHUTDNSRC 0x05
29#define TPS65086_BUCK1CTRL 0x20
30#define TPS65086_BUCK2CTRL 0x21
31#define TPS65086_BUCK3DECAY 0x22
32#define TPS65086_BUCK3VID 0x23
33#define TPS65086_BUCK3SLPCTRL 0x24
34#define TPS65086_BUCK4CTRL 0x25
35#define TPS65086_BUCK5CTRL 0x26
36#define TPS65086_BUCK6CTRL 0x27
37#define TPS65086_LDOA2CTRL 0x28
38#define TPS65086_LDOA3CTRL 0x29
39#define TPS65086_DISCHCTRL1 0x40
40#define TPS65086_DISCHCTRL2 0x41
41#define TPS65086_DISCHCTRL3 0x42
42#define TPS65086_PG_DELAY1 0x43
43#define TPS65086_FORCESHUTDN 0x91
44#define TPS65086_BUCK1SLPCTRL 0x92
45#define TPS65086_BUCK2SLPCTRL 0x93
46#define TPS65086_BUCK4VID 0x94
47#define TPS65086_BUCK4SLPVID 0x95
48#define TPS65086_BUCK5VID 0x96
49#define TPS65086_BUCK5SLPVID 0x97
50#define TPS65086_BUCK6VID 0x98
51#define TPS65086_BUCK6SLPVID 0x99
52#define TPS65086_LDOA2VID 0x9A
53#define TPS65086_LDOA3VID 0x9B
54#define TPS65086_BUCK123CTRL 0x9C
55#define TPS65086_PG_DELAY2 0x9D
56#define TPS65086_PIN_EN_MASK1 0x9E
57#define TPS65086_PIN_EN_MASK2 0x9F
58#define TPS65086_SWVTT_EN 0x9F
59#define TPS65086_PIN_EN_OVR1 0xA0
60#define TPS65086_PIN_EN_OVR2 0xA1
61#define TPS65086_GPOCTRL 0xA1
62#define TPS65086_PWR_FAULT_MASK1 0xA2
63#define TPS65086_PWR_FAULT_MASK2 0xA3
64#define TPS65086_GPO1PG_CTRL1 0xA4
65#define TPS65086_GPO1PG_CTRL2 0xA5
66#define TPS65086_GPO4PG_CTRL1 0xA6
67#define TPS65086_GPO4PG_CTRL2 0xA7
68#define TPS65086_GPO2PG_CTRL1 0xA8
69#define TPS65086_GPO2PG_CTRL2 0xA9
70#define TPS65086_GPO3PG_CTRL1 0xAA
71#define TPS65086_GPO3PG_CTRL2 0xAB
72#define TPS65086_LDOA1CTRL 0xAE
73#define TPS65086_PG_STATUS1 0xB0
74#define TPS65086_PG_STATUS2 0xB1
75#define TPS65086_PWR_FAULT_STATUS1 0xB2
76#define TPS65086_PWR_FAULT_STATUS2 0xB3
77#define TPS65086_TEMPCRIT 0xB4
78#define TPS65086_TEMPHOT 0xB5
79#define TPS65086_OC_STATUS 0xB6
80
81/* IRQ Register field definitions */
82#define TPS65086_IRQ_DIETEMP_MASK BIT(0)
83#define TPS65086_IRQ_SHUTDN_MASK BIT(3)
84#define TPS65086_IRQ_FAULT_MASK BIT(7)
85
86/* DEVICEID Register field definitions */
87#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0)
88#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4)
89#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6)
90
91/* VID Masks */
92#define BUCK_VID_MASK GENMASK(7, 1)
93#define VDOA1_VID_MASK GENMASK(4, 1)
94#define VDOA23_VID_MASK GENMASK(3, 0)
95
96/* Define the TPS65086 IRQ numbers */
97enum tps65086_irqs {
98 TPS65086_IRQ_DIETEMP,
99 TPS65086_IRQ_SHUTDN,
100 TPS65086_IRQ_FAULT,
101};
102
103/**
104 * struct tps65086 - state holder for the tps65086 driver
105 *
106 * Device data may be used to access the TPS65086 chip
107 */
108struct tps65086 {
109 struct device *dev;
110 struct regmap *regmap;
111
112 /* IRQ Data */
113 int irq;
114 struct regmap_irq_chip_data *irq_data;
115};
116
117#endif /* __LINUX_MFD_TPS65086_H */
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 0bf2708df150..67d144b3b8f9 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -77,6 +77,11 @@ enum {
77#define TPS65090_REG_CG_CTRL5 0x09 77#define TPS65090_REG_CG_CTRL5 0x09
78#define TPS65090_REG_CG_STATUS1 0x0a 78#define TPS65090_REG_CG_STATUS1 0x0a
79#define TPS65090_REG_CG_STATUS2 0x0b 79#define TPS65090_REG_CG_STATUS2 0x0b
80#define TPS65090_REG_AD_OUT1 0x17
81#define TPS65090_REG_AD_OUT2 0x18
82
83#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2
84#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1)
80 85
81struct tps65090 { 86struct tps65090 {
82 struct device *dev; 87 struct device *dev;
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 6d309032dc0d..1a603701550e 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,28 +1,27 @@
1/* 1/*
2 * tps65912.h -- TI TPS6591x 2 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
3 * Andrew F. Davis <afd@ti.com>
3 * 4 *
4 * Copyright 2011 Texas Instruments Inc. 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
5 * 8 *
6 * Author: Margarita Olaya <magi@slimlogic.co.uk> 9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
7 * 10 * kind, whether expressed or implied; without even the implied warranty
8 * This program is free software; you can redistribute it and/or modify it 11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * under the terms of the GNU General Public License as published by the 12 * GNU General Public License version 2 for more details.
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 * 13 *
14 * Based on the TPS65218 driver and the previous TPS65912 driver by
15 * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
13 */ 16 */
14 17
15#ifndef __LINUX_MFD_TPS65912_H 18#ifndef __LINUX_MFD_TPS65912_H
16#define __LINUX_MFD_TPS65912_H 19#define __LINUX_MFD_TPS65912_H
17 20
18/* TPS regulator type list */ 21#include <linux/device.h>
19#define REGULATOR_LDO 0 22#include <linux/regmap.h>
20#define REGULATOR_DCDC 1
21
22/*
23 * List of registers for TPS65912
24 */
25 23
24/* List of registers for TPS65912 */
26#define TPS65912_DCDC1_CTRL 0x00 25#define TPS65912_DCDC1_CTRL 0x00
27#define TPS65912_DCDC2_CTRL 0x01 26#define TPS65912_DCDC2_CTRL 0x01
28#define TPS65912_DCDC3_CTRL 0x02 27#define TPS65912_DCDC3_CTRL 0x02
@@ -126,41 +125,45 @@
126#define TPS65912_VERNUM 0x64 125#define TPS65912_VERNUM 0x64
127#define TPS6591X_MAX_REGISTER 0x64 126#define TPS6591X_MAX_REGISTER 0x64
128 127
129/* IRQ Definitions */ 128/* INT_STS Register field definitions */
130#define TPS65912_IRQ_PWRHOLD_F 0 129#define TPS65912_INT_STS_PWRHOLD_F BIT(0)
131#define TPS65912_IRQ_VMON 1 130#define TPS65912_INT_STS_VMON BIT(1)
132#define TPS65912_IRQ_PWRON 2 131#define TPS65912_INT_STS_PWRON BIT(2)
133#define TPS65912_IRQ_PWRON_LP 3 132#define TPS65912_INT_STS_PWRON_LP BIT(3)
134#define TPS65912_IRQ_PWRHOLD_R 4 133#define TPS65912_INT_STS_PWRHOLD_R BIT(4)
135#define TPS65912_IRQ_HOTDIE 5 134#define TPS65912_INT_STS_HOTDIE BIT(5)
136#define TPS65912_IRQ_GPIO1_R 6 135#define TPS65912_INT_STS_GPIO1_R BIT(6)
137#define TPS65912_IRQ_GPIO1_F 7 136#define TPS65912_INT_STS_GPIO1_F BIT(7)
138#define TPS65912_IRQ_GPIO2_R 8 137
139#define TPS65912_IRQ_GPIO2_F 9 138/* INT_STS Register field definitions */
140#define TPS65912_IRQ_GPIO3_R 10 139#define TPS65912_INT_STS2_GPIO2_R BIT(0)
141#define TPS65912_IRQ_GPIO3_F 11 140#define TPS65912_INT_STS2_GPIO2_F BIT(1)
142#define TPS65912_IRQ_GPIO4_R 12 141#define TPS65912_INT_STS2_GPIO3_R BIT(2)
143#define TPS65912_IRQ_GPIO4_F 13 142#define TPS65912_INT_STS2_GPIO3_F BIT(3)
144#define TPS65912_IRQ_GPIO5_R 14 143#define TPS65912_INT_STS2_GPIO4_R BIT(4)
145#define TPS65912_IRQ_GPIO5_F 15 144#define TPS65912_INT_STS2_GPIO4_F BIT(5)
146#define TPS65912_IRQ_PGOOD_DCDC1 16 145#define TPS65912_INT_STS2_GPIO5_R BIT(6)
147#define TPS65912_IRQ_PGOOD_DCDC2 17 146#define TPS65912_INT_STS2_GPIO5_F BIT(7)
148#define TPS65912_IRQ_PGOOD_DCDC3 18
149#define TPS65912_IRQ_PGOOD_DCDC4 19
150#define TPS65912_IRQ_PGOOD_LDO1 20
151#define TPS65912_IRQ_PGOOD_LDO2 21
152#define TPS65912_IRQ_PGOOD_LDO3 22
153#define TPS65912_IRQ_PGOOD_LDO4 23
154#define TPS65912_IRQ_PGOOD_LDO5 24
155#define TPS65912_IRQ_PGOOD_LDO6 25
156#define TPS65912_IRQ_PGOOD_LDO7 26
157#define TPS65912_IRQ_PGOOD_LD08 27
158#define TPS65912_IRQ_PGOOD_LDO9 28
159#define TPS65912_IRQ_PGOOD_LDO10 29
160 147
161#define TPS65912_NUM_IRQ 30 148/* INT_STS Register field definitions */
149#define TPS65912_INT_STS3_PGOOD_DCDC1 BIT(0)
150#define TPS65912_INT_STS3_PGOOD_DCDC2 BIT(1)
151#define TPS65912_INT_STS3_PGOOD_DCDC3 BIT(2)
152#define TPS65912_INT_STS3_PGOOD_DCDC4 BIT(3)
153#define TPS65912_INT_STS3_PGOOD_LDO1 BIT(4)
154#define TPS65912_INT_STS3_PGOOD_LDO2 BIT(5)
155#define TPS65912_INT_STS3_PGOOD_LDO3 BIT(6)
156#define TPS65912_INT_STS3_PGOOD_LDO4 BIT(7)
162 157
163/* GPIO 1 and 2 Register Definitions */ 158/* INT_STS Register field definitions */
159#define TPS65912_INT_STS4_PGOOD_LDO5 BIT(0)
160#define TPS65912_INT_STS4_PGOOD_LDO6 BIT(1)
161#define TPS65912_INT_STS4_PGOOD_LDO7 BIT(2)
162#define TPS65912_INT_STS4_PGOOD_LDO8 BIT(3)
163#define TPS65912_INT_STS4_PGOOD_LDO9 BIT(4)
164#define TPS65912_INT_STS4_PGOOD_LDO10 BIT(5)
165
166/* GPIO 1 and 2 Register field definitions */
164#define GPIO_SLEEP_MASK 0x80 167#define GPIO_SLEEP_MASK 0x80
165#define GPIO_SLEEP_SHIFT 7 168#define GPIO_SLEEP_SHIFT 7
166#define GPIO_DEB_MASK 0x10 169#define GPIO_DEB_MASK 0x10
@@ -172,7 +175,7 @@
172#define GPIO_SET_MASK 0x01 175#define GPIO_SET_MASK 0x01
173#define GPIO_SET_SHIFT 0 176#define GPIO_SET_SHIFT 0
174 177
175/* GPIO 3 Register Definitions */ 178/* GPIO 3 Register field definitions */
176#define GPIO3_SLEEP_MASK 0x80 179#define GPIO3_SLEEP_MASK 0x80
177#define GPIO3_SLEEP_SHIFT 7 180#define GPIO3_SLEEP_SHIFT 7
178#define GPIO3_SEL_MASK 0x40 181#define GPIO3_SEL_MASK 0x40
@@ -190,7 +193,7 @@
190#define GPIO3_SET_MASK 0x01 193#define GPIO3_SET_MASK 0x01
191#define GPIO3_SET_SHIFT 0 194#define GPIO3_SET_SHIFT 0
192 195
193/* GPIO 4 Register Definitions */ 196/* GPIO 4 Register field definitions */
194#define GPIO4_SLEEP_MASK 0x80 197#define GPIO4_SLEEP_MASK 0x80
195#define GPIO4_SLEEP_SHIFT 7 198#define GPIO4_SLEEP_SHIFT 7
196#define GPIO4_SEL_MASK 0x40 199#define GPIO4_SEL_MASK 0x40
@@ -264,65 +267,75 @@
264#define DCDC_LIMIT_MAX_SEL_MASK 0x3F 267#define DCDC_LIMIT_MAX_SEL_MASK 0x3F
265#define DCDC_LIMIT_MAX_SEL_SHIFT 0 268#define DCDC_LIMIT_MAX_SEL_SHIFT 0
266 269
267/** 270/* Define the TPS65912 IRQ numbers */
268 * struct tps65912_board 271enum tps65912_irqs {
269 * Board platform dat may be used to initialize regulators. 272 /* INT_STS registers */
270 */ 273 TPS65912_IRQ_PWRHOLD_F,
271struct tps65912_board { 274 TPS65912_IRQ_VMON,
272 int is_dcdc1_avs; 275 TPS65912_IRQ_PWRON,
273 int is_dcdc2_avs; 276 TPS65912_IRQ_PWRON_LP,
274 int is_dcdc3_avs; 277 TPS65912_IRQ_PWRHOLD_R,
275 int is_dcdc4_avs; 278 TPS65912_IRQ_HOTDIE,
276 int irq; 279 TPS65912_IRQ_GPIO1_R,
277 int irq_base; 280 TPS65912_IRQ_GPIO1_F,
278 int gpio_base; 281 /* INT_STS2 registers */
279 struct regulator_init_data *tps65912_pmic_init_data; 282 TPS65912_IRQ_GPIO2_R,
283 TPS65912_IRQ_GPIO2_F,
284 TPS65912_IRQ_GPIO3_R,
285 TPS65912_IRQ_GPIO3_F,
286 TPS65912_IRQ_GPIO4_R,
287 TPS65912_IRQ_GPIO4_F,
288 TPS65912_IRQ_GPIO5_R,
289 TPS65912_IRQ_GPIO5_F,
290 /* INT_STS3 registers */
291 TPS65912_IRQ_PGOOD_DCDC1,
292 TPS65912_IRQ_PGOOD_DCDC2,
293 TPS65912_IRQ_PGOOD_DCDC3,
294 TPS65912_IRQ_PGOOD_DCDC4,
295 TPS65912_IRQ_PGOOD_LDO1,
296 TPS65912_IRQ_PGOOD_LDO2,
297 TPS65912_IRQ_PGOOD_LDO3,
298 TPS65912_IRQ_PGOOD_LDO4,
299 /* INT_STS4 registers */
300 TPS65912_IRQ_PGOOD_LDO5,
301 TPS65912_IRQ_PGOOD_LDO6,
302 TPS65912_IRQ_PGOOD_LDO7,
303 TPS65912_IRQ_PGOOD_LDO8,
304 TPS65912_IRQ_PGOOD_LDO9,
305 TPS65912_IRQ_PGOOD_LDO10,
280}; 306};
281 307
282/** 308/*
283 * struct tps65912 - tps65912 sub-driver chip access routines 309 * struct tps65912 - state holder for the tps65912 driver
310 *
311 * Device data may be used to access the TPS65912 chip
284 */ 312 */
285
286struct tps65912 { 313struct tps65912 {
287 struct device *dev; 314 struct device *dev;
288 /* for read/write acces */ 315 struct regmap *regmap;
289 struct mutex io_mutex;
290
291 /* For device IO interfaces: I2C or SPI */
292 void *control_data;
293
294 int (*read)(struct tps65912 *tps65912, u8 reg, int size, void *dest);
295 int (*write)(struct tps65912 *tps65912, u8 reg, int size, void *src);
296
297 /* Client devices */
298 struct tps65912_pmic *pmic;
299 316
300 /* GPIO Handling */ 317 /* IRQ Data */
301 struct gpio_chip gpio; 318 int irq;
319 struct regmap_irq_chip_data *irq_data;
320};
302 321
303 /* IRQ Handling */ 322static const struct regmap_range tps65912_yes_ranges[] = {
304 struct mutex irq_lock; 323 regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5),
305 int chip_irq;
306 int irq_base;
307 int irq_num;
308 u32 irq_mask;
309}; 324};
310 325
311struct tps65912_platform_data { 326static const struct regmap_access_table tps65912_volatile_table = {
312 int irq; 327 .yes_ranges = tps65912_yes_ranges,
313 int irq_base; 328 .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges),
314}; 329};
315 330
316unsigned int tps_chip(void); 331static const struct regmap_config tps65912_regmap_config = {
332 .reg_bits = 8,
333 .val_bits = 8,
334 .cache_type = REGCACHE_RBTREE,
335 .volatile_table = &tps65912_volatile_table,
336};
317 337
318int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask); 338int tps65912_device_init(struct tps65912 *tps);
319int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask); 339int tps65912_device_exit(struct tps65912 *tps);
320int tps65912_reg_read(struct tps65912 *tps65912, u8 reg);
321int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val);
322int tps65912_device_init(struct tps65912 *tps65912);
323void tps65912_device_exit(struct tps65912 *tps65912);
324int tps65912_irq_init(struct tps65912 *tps65912, int irq,
325 struct tps65912_platform_data *pdata);
326int tps65912_irq_exit(struct tps65912 *tps65912);
327 340
328#endif /* __LINUX_MFD_TPS65912_H */ 341#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index cac1c0904d5f..9b50325e4ddf 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -23,9 +23,13 @@ enum migrate_reason {
23 MR_SYSCALL, /* also applies to cpusets */ 23 MR_SYSCALL, /* also applies to cpusets */
24 MR_MEMPOLICY_MBIND, 24 MR_MEMPOLICY_MBIND,
25 MR_NUMA_MISPLACED, 25 MR_NUMA_MISPLACED,
26 MR_CMA 26 MR_CMA,
27 MR_TYPES
27}; 28};
28 29
30/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
31extern char *migrate_reason_names[MR_TYPES];
32
29#ifdef CONFIG_MIGRATION 33#ifdef CONFIG_MIGRATION
30 34
31extern void putback_movable_pages(struct list_head *l); 35extern void putback_movable_pages(struct list_head *l);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a0e8cc8dcc67..d1f904c8b2cb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -219,6 +219,7 @@ enum {
219 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, 219 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
220 MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, 220 MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
221 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33, 221 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
222 MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
222}; 223};
223 224
224enum { 225enum {
@@ -827,6 +828,11 @@ struct mlx4_vf_dev {
827 u8 n_ports; 828 u8 n_ports;
828}; 829};
829 830
831enum mlx4_pci_status {
832 MLX4_PCI_STATUS_DISABLED,
833 MLX4_PCI_STATUS_ENABLED,
834};
835
830struct mlx4_dev_persistent { 836struct mlx4_dev_persistent {
831 struct pci_dev *pdev; 837 struct pci_dev *pdev;
832 struct mlx4_dev *dev; 838 struct mlx4_dev *dev;
@@ -840,6 +846,8 @@ struct mlx4_dev_persistent {
840 u8 state; 846 u8 state;
841 struct mutex interface_state_mutex; /* protect SW state */ 847 struct mutex interface_state_mutex; /* protect SW state */
842 u8 interface_state; 848 u8 interface_state;
849 struct mutex pci_status_mutex; /* sync pci state */
850 enum mlx4_pci_status pci_status;
843}; 851};
844 852
845struct mlx4_dev { 853struct mlx4_dev {
@@ -1160,6 +1168,8 @@ enum mlx4_net_trans_promisc_mode {
1160 MLX4_FS_REGULAR = 1, 1168 MLX4_FS_REGULAR = 1,
1161 MLX4_FS_ALL_DEFAULT, 1169 MLX4_FS_ALL_DEFAULT,
1162 MLX4_FS_MC_DEFAULT, 1170 MLX4_FS_MC_DEFAULT,
1171 MLX4_FS_MIRROR_RX_PORT,
1172 MLX4_FS_MIRROR_SX_PORT,
1163 MLX4_FS_UC_SNIFFER, 1173 MLX4_FS_UC_SNIFFER,
1164 MLX4_FS_MC_SNIFFER, 1174 MLX4_FS_MC_SNIFFER,
1165 MLX4_FS_MODE_NUM, /* should be last */ 1175 MLX4_FS_MODE_NUM, /* should be last */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 2e8af001c5da..bd0e7075ea6d 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -33,6 +33,7 @@
33#ifndef MLX4_DRIVER_H 33#ifndef MLX4_DRIVER_H
34#define MLX4_DRIVER_H 34#define MLX4_DRIVER_H
35 35
36#include <net/devlink.h>
36#include <linux/mlx4/device.h> 37#include <linux/mlx4/device.h>
37 38
38struct mlx4_dev; 39struct mlx4_dev;
@@ -89,6 +90,8 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
89 90
90void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); 91void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
91 92
93struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
94
92static inline u64 mlx4_mac_to_u64(u8 *addr) 95static inline u64 mlx4_mac_to_u64(u8 *addr)
93{ 96{
94 u64 mac = 0; 97 u64 mac = 0;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 987764afa65c..b3575f392492 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -105,6 +105,29 @@ __mlx5_mask(typ, fld))
105 ___t; \ 105 ___t; \
106}) 106})
107 107
108/* Big endian getters */
109#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
110 __mlx5_64_off(typ, fld)))
111
112#define MLX5_GET_BE(type_t, typ, p, fld) ({ \
113 type_t tmp; \
114 switch (sizeof(tmp)) { \
115 case sizeof(u8): \
116 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
117 break; \
118 case sizeof(u16): \
119 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
120 break; \
121 case sizeof(u32): \
122 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
123 break; \
124 case sizeof(u64): \
125 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
126 break; \
127 } \
128 tmp; \
129 })
130
108enum { 131enum {
109 MLX5_MAX_COMMANDS = 32, 132 MLX5_MAX_COMMANDS = 32,
110 MLX5_CMD_DATA_BLOCK_SIZE = 512, 133 MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -351,6 +374,12 @@ enum {
351}; 374};
352 375
353enum { 376enum {
377 MLX5_BW_NO_LIMIT = 0,
378 MLX5_100_MBPS_UNIT = 3,
379 MLX5_GBPS_UNIT = 4,
380};
381
382enum {
354 MLX5_MAX_PAGE_SHIFT = 31 383 MLX5_MAX_PAGE_SHIFT = 31
355}; 384};
356 385
@@ -363,6 +392,17 @@ enum {
363 MLX5_CAP_OFF_CMDIF_CSUM = 46, 392 MLX5_CAP_OFF_CMDIF_CSUM = 46,
364}; 393};
365 394
395enum {
396 /*
397 * Max wqe size for rdma read is 512 bytes, so this
398 * limits our max_sge_rd as the wqe needs to fit:
399 * - ctrl segment (16 bytes)
400 * - rdma segment (16 bytes)
401 * - scatter elements (16 bytes each)
402 */
403 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
404};
405
366struct mlx5_inbox_hdr { 406struct mlx5_inbox_hdr {
367 __be16 opcode; 407 __be16 opcode;
368 u8 rsvd[4]; 408 u8 rsvd[4];
@@ -1177,6 +1217,17 @@ enum {
1177 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, 1217 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1178}; 1218};
1179 1219
1220enum mlx5_wol_mode {
1221 MLX5_WOL_DISABLE = 0,
1222 MLX5_WOL_SECURED_MAGIC = 1 << 1,
1223 MLX5_WOL_MAGIC = 1 << 2,
1224 MLX5_WOL_ARP = 1 << 3,
1225 MLX5_WOL_BROADCAST = 1 << 4,
1226 MLX5_WOL_MULTICAST = 1 << 5,
1227 MLX5_WOL_UNICAST = 1 << 6,
1228 MLX5_WOL_PHY_ACTIVITY = 1 << 7,
1229};
1230
1180/* MLX5 DEV CAPs */ 1231/* MLX5 DEV CAPs */
1181 1232
1182/* TODO: EAT.ME */ 1233/* TODO: EAT.ME */
@@ -1196,6 +1247,8 @@ enum mlx5_cap_type {
1196 MLX5_CAP_FLOW_TABLE, 1247 MLX5_CAP_FLOW_TABLE,
1197 MLX5_CAP_ESWITCH_FLOW_TABLE, 1248 MLX5_CAP_ESWITCH_FLOW_TABLE,
1198 MLX5_CAP_ESWITCH, 1249 MLX5_CAP_ESWITCH,
1250 MLX5_CAP_RESERVED,
1251 MLX5_CAP_VECTOR_CALC,
1199 /* NUM OF CAP Types */ 1252 /* NUM OF CAP Types */
1200 MLX5_CAP_NUM 1253 MLX5_CAP_NUM
1201}; 1254};
@@ -1258,6 +1311,10 @@ enum mlx5_cap_type {
1258#define MLX5_CAP_ODP(mdev, cap)\ 1311#define MLX5_CAP_ODP(mdev, cap)\
1259 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) 1312 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1260 1313
1314#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1315 MLX5_GET(vector_calc_cap, \
1316 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
1317
1261enum { 1318enum {
1262 MLX5_CMD_STAT_OK = 0x0, 1319 MLX5_CMD_STAT_OK = 0x0,
1263 MLX5_CMD_STAT_INT_ERR = 0x1, 1320 MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1284,7 +1341,8 @@ enum {
1284 MLX5_RFC_3635_COUNTERS_GROUP = 0x3, 1341 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1285 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, 1342 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1286 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, 1343 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1287 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11 1344 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1345 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1288}; 1346};
1289 1347
1290static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1348static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
@@ -1294,6 +1352,11 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1294 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; 1352 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1295} 1353}
1296 1354
1297#define MLX5_BY_PASS_NUM_PRIOS 9 1355#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
1356#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
1357#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1358#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1359 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1360 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1298 1361
1299#endif /* MLX5_DEVICE_H */ 1362#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1e3006dcf35d..369c837d40f5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -54,7 +54,7 @@ enum {
54 /* one minute for the sake of bringup. Generally, commands must always 54 /* one minute for the sake of bringup. Generally, commands must always
55 * complete and we may need to increase this timeout value 55 * complete and we may need to increase this timeout value
56 */ 56 */
57 MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000, 57 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
58 MLX5_CMD_WQ_MAX_NAME = 32, 58 MLX5_CMD_WQ_MAX_NAME = 32,
59}; 59};
60 60
@@ -99,6 +99,8 @@ enum {
99}; 99};
100 100
101enum { 101enum {
102 MLX5_REG_QETCR = 0x4005,
103 MLX5_REG_QTCT = 0x400a,
102 MLX5_REG_PCAP = 0x5001, 104 MLX5_REG_PCAP = 0x5001,
103 MLX5_REG_PMTU = 0x5003, 105 MLX5_REG_PMTU = 0x5003,
104 MLX5_REG_PTYS = 0x5004, 106 MLX5_REG_PTYS = 0x5004,
@@ -338,7 +340,7 @@ struct mlx5_core_sig_ctx {
338 u32 sigerr_count; 340 u32 sigerr_count;
339}; 341};
340 342
341struct mlx5_core_mr { 343struct mlx5_core_mkey {
342 u64 iova; 344 u64 iova;
343 u64 size; 345 u64 size;
344 u32 key; 346 u32 key;
@@ -426,7 +428,7 @@ struct mlx5_srq_table {
426 struct radix_tree_root tree; 428 struct radix_tree_root tree;
427}; 429};
428 430
429struct mlx5_mr_table { 431struct mlx5_mkey_table {
430 /* protect radix tree 432 /* protect radix tree
431 */ 433 */
432 rwlock_t lock; 434 rwlock_t lock;
@@ -458,8 +460,6 @@ struct mlx5_priv {
458 struct mlx5_uuar_info uuari; 460 struct mlx5_uuar_info uuari;
459 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); 461 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
460 462
461 struct io_mapping *bf_mapping;
462
463 /* pages stuff */ 463 /* pages stuff */
464 struct workqueue_struct *pg_wq; 464 struct workqueue_struct *pg_wq;
465 struct rb_root page_root; 465 struct rb_root page_root;
@@ -484,9 +484,9 @@ struct mlx5_priv {
484 struct mlx5_cq_table cq_table; 484 struct mlx5_cq_table cq_table;
485 /* end: cq staff */ 485 /* end: cq staff */
486 486
487 /* start: mr staff */ 487 /* start: mkey staff */
488 struct mlx5_mr_table mr_table; 488 struct mlx5_mkey_table mkey_table;
489 /* end: mr staff */ 489 /* end: mkey staff */
490 490
491 /* start: alloc staff */ 491 /* start: alloc staff */
492 /* protect buffer alocation according to numa node */ 492 /* protect buffer alocation according to numa node */
@@ -519,8 +519,9 @@ enum mlx5_device_state {
519}; 519};
520 520
521enum mlx5_interface_state { 521enum mlx5_interface_state {
522 MLX5_INTERFACE_STATE_DOWN, 522 MLX5_INTERFACE_STATE_DOWN = BIT(0),
523 MLX5_INTERFACE_STATE_UP, 523 MLX5_INTERFACE_STATE_UP = BIT(1),
524 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
524}; 525};
525 526
526enum mlx5_pci_status { 527enum mlx5_pci_status {
@@ -544,7 +545,7 @@ struct mlx5_core_dev {
544 enum mlx5_device_state state; 545 enum mlx5_device_state state;
545 /* sync interface state */ 546 /* sync interface state */
546 struct mutex intf_state_mutex; 547 struct mutex intf_state_mutex;
547 enum mlx5_interface_state interface_state; 548 unsigned long intf_state;
548 void (*event) (struct mlx5_core_dev *dev, 549 void (*event) (struct mlx5_core_dev *dev,
549 enum mlx5_dev_event event, 550 enum mlx5_dev_event event,
550 unsigned long param); 551 unsigned long param);
@@ -613,7 +614,10 @@ struct mlx5_pas {
613}; 614};
614 615
615enum port_state_policy { 616enum port_state_policy {
616 MLX5_AAA_000 617 MLX5_POLICY_DOWN = 0,
618 MLX5_POLICY_UP = 1,
619 MLX5_POLICY_FOLLOW = 2,
620 MLX5_POLICY_INVALID = 0xffffffff
617}; 621};
618 622
619enum phy_port_state { 623enum phy_port_state {
@@ -706,8 +710,7 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
706void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 710void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
707int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 711int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
708int mlx5_cmd_status_to_err_v2(void *ptr); 712int mlx5_cmd_status_to_err_v2(void *ptr);
709int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, 713int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
710 enum mlx5_cap_mode cap_mode);
711int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 714int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
712 int out_size); 715 int out_size);
713int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 716int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -717,7 +720,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
717int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 720int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
718int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 721int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
719int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 722int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
720int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); 723int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
724 bool map_wc);
721void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); 725void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
722void mlx5_health_cleanup(struct mlx5_core_dev *dev); 726void mlx5_health_cleanup(struct mlx5_core_dev *dev);
723int mlx5_health_init(struct mlx5_core_dev *dev); 727int mlx5_health_init(struct mlx5_core_dev *dev);
@@ -739,16 +743,18 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
739 struct mlx5_query_srq_mbox_out *out); 743 struct mlx5_query_srq_mbox_out *out);
740int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 744int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
741 u16 lwm, int is_srq); 745 u16 lwm, int is_srq);
742void mlx5_init_mr_table(struct mlx5_core_dev *dev); 746void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
743void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); 747void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
744int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 748int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
749 struct mlx5_core_mkey *mkey,
745 struct mlx5_create_mkey_mbox_in *in, int inlen, 750 struct mlx5_create_mkey_mbox_in *in, int inlen,
746 mlx5_cmd_cbk_t callback, void *context, 751 mlx5_cmd_cbk_t callback, void *context,
747 struct mlx5_create_mkey_mbox_out *out); 752 struct mlx5_create_mkey_mbox_out *out);
748int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); 753int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
749int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 754 struct mlx5_core_mkey *mkey);
755int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
750 struct mlx5_query_mkey_mbox_out *out, int outlen); 756 struct mlx5_query_mkey_mbox_out *out, int outlen);
751int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 757int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
752 u32 *mkey); 758 u32 *mkey);
753int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 759int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
754int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 760int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
@@ -794,37 +800,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
794 int size_in, void *data_out, int size_out, 800 int size_in, void *data_out, int size_out,
795 u16 reg_num, int arg, int write); 801 u16 reg_num, int arg, int write);
796 802
797int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
798int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
799 int ptys_size, int proto_mask, u8 local_port);
800int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
801 u32 *proto_cap, int proto_mask);
802int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
803 u32 *proto_admin, int proto_mask);
804int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
805 u8 *link_width_oper, u8 local_port);
806int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
807 u8 *proto_oper, int proto_mask,
808 u8 local_port);
809int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
810 int proto_mask);
811int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
812 enum mlx5_port_status status);
813int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
814 enum mlx5_port_status *status);
815
816int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
817void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
818void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
819 u8 port);
820
821int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
822 u8 *vl_hw_cap, u8 local_port);
823
824int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
825int mlx5_query_port_pause(struct mlx5_core_dev *dev,
826 u32 *rx_pause, u32 *tx_pause);
827
828int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 803int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
829void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 804void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
830int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 805int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -847,6 +822,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
847void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); 822void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
848int mlx5_query_odp_caps(struct mlx5_core_dev *dev, 823int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
849 struct mlx5_odp_caps *odp_caps); 824 struct mlx5_odp_caps *odp_caps);
825int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
826 u8 port_num, void *out, size_t sz);
850 827
851static inline int fw_initializing(struct mlx5_core_dev *dev) 828static inline int fw_initializing(struct mlx5_core_dev *dev)
852{ 829{
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 8230caa3fb6e..8dec5508d93d 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,6 +38,10 @@
38 38
39#define MLX5_FS_DEFAULT_FLOW_TAG 0x0 39#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
40 40
41enum {
42 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
43};
44
41#define LEFTOVERS_RULE_NUM 2 45#define LEFTOVERS_RULE_NUM 2
42static inline void build_leftovers_ft_param(int *priority, 46static inline void build_leftovers_ft_param(int *priority,
43 int *n_ent, 47 int *n_ent,
@@ -52,6 +56,7 @@ enum mlx5_flow_namespace_type {
52 MLX5_FLOW_NAMESPACE_BYPASS, 56 MLX5_FLOW_NAMESPACE_BYPASS,
53 MLX5_FLOW_NAMESPACE_KERNEL, 57 MLX5_FLOW_NAMESPACE_KERNEL,
54 MLX5_FLOW_NAMESPACE_LEFTOVERS, 58 MLX5_FLOW_NAMESPACE_LEFTOVERS,
59 MLX5_FLOW_NAMESPACE_ANCHOR,
55 MLX5_FLOW_NAMESPACE_FDB, 60 MLX5_FLOW_NAMESPACE_FDB,
56}; 61};
57 62
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 58eef02edc7e..c15b8a864937 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -166,6 +166,8 @@ enum {
166 MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, 166 MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
167 MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, 167 MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
168 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, 168 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
169 MLX5_CMD_OP_SET_WOL_ROL = 0x830,
170 MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
169 MLX5_CMD_OP_CREATE_TIR = 0x900, 171 MLX5_CMD_OP_CREATE_TIR = 0x900,
170 MLX5_CMD_OP_MODIFY_TIR = 0x901, 172 MLX5_CMD_OP_MODIFY_TIR = 0x901,
171 MLX5_CMD_OP_DESTROY_TIR = 0x902, 173 MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -458,7 +460,8 @@ struct mlx5_ifc_ads_bits {
458}; 460};
459 461
460struct mlx5_ifc_flow_table_nic_cap_bits { 462struct mlx5_ifc_flow_table_nic_cap_bits {
461 u8 reserved_at_0[0x200]; 463 u8 nic_rx_multi_path_tirs[0x1];
464 u8 reserved_at_1[0x1ff];
462 465
463 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; 466 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
464 467
@@ -615,6 +618,33 @@ struct mlx5_ifc_odp_cap_bits {
615 u8 reserved_at_e0[0x720]; 618 u8 reserved_at_e0[0x720];
616}; 619};
617 620
621struct mlx5_ifc_calc_op {
622 u8 reserved_at_0[0x10];
623 u8 reserved_at_10[0x9];
624 u8 op_swap_endianness[0x1];
625 u8 op_min[0x1];
626 u8 op_xor[0x1];
627 u8 op_or[0x1];
628 u8 op_and[0x1];
629 u8 op_max[0x1];
630 u8 op_add[0x1];
631};
632
633struct mlx5_ifc_vector_calc_cap_bits {
634 u8 calc_matrix[0x1];
635 u8 reserved_at_1[0x1f];
636 u8 reserved_at_20[0x8];
637 u8 max_vec_count[0x8];
638 u8 reserved_at_30[0xd];
639 u8 max_chunk_size[0x3];
640 struct mlx5_ifc_calc_op calc0;
641 struct mlx5_ifc_calc_op calc1;
642 struct mlx5_ifc_calc_op calc2;
643 struct mlx5_ifc_calc_op calc3;
644
645 u8 reserved_at_e0[0x720];
646};
647
618enum { 648enum {
619 MLX5_WQ_TYPE_LINKED_LIST = 0x0, 649 MLX5_WQ_TYPE_LINKED_LIST = 0x0,
620 MLX5_WQ_TYPE_CYCLIC = 0x1, 650 MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -729,14 +759,28 @@ struct mlx5_ifc_cmd_hca_cap_bits {
729 759
730 u8 reserved_at_1bf[0x3]; 760 u8 reserved_at_1bf[0x3];
731 u8 log_max_msg[0x5]; 761 u8 log_max_msg[0x5];
732 u8 reserved_at_1c7[0x18]; 762 u8 reserved_at_1c7[0x4];
763 u8 max_tc[0x4];
764 u8 reserved_at_1cf[0x6];
765 u8 rol_s[0x1];
766 u8 rol_g[0x1];
767 u8 reserved_at_1d7[0x1];
768 u8 wol_s[0x1];
769 u8 wol_g[0x1];
770 u8 wol_a[0x1];
771 u8 wol_b[0x1];
772 u8 wol_m[0x1];
773 u8 wol_u[0x1];
774 u8 wol_p[0x1];
733 775
734 u8 stat_rate_support[0x10]; 776 u8 stat_rate_support[0x10];
735 u8 reserved_at_1ef[0xc]; 777 u8 reserved_at_1ef[0xc];
736 u8 cqe_version[0x4]; 778 u8 cqe_version[0x4];
737 779
738 u8 compact_address_vector[0x1]; 780 u8 compact_address_vector[0x1];
739 u8 reserved_at_200[0xe]; 781 u8 reserved_at_200[0x3];
782 u8 ipoib_basic_offloads[0x1];
783 u8 reserved_at_204[0xa];
740 u8 drain_sigerr[0x1]; 784 u8 drain_sigerr[0x1];
741 u8 cmdif_checksum[0x2]; 785 u8 cmdif_checksum[0x2];
742 u8 sigerr_cqe[0x1]; 786 u8 sigerr_cqe[0x1];
@@ -767,10 +811,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
767 u8 cd[0x1]; 811 u8 cd[0x1];
768 u8 reserved_at_22c[0x1]; 812 u8 reserved_at_22c[0x1];
769 u8 apm[0x1]; 813 u8 apm[0x1];
770 u8 reserved_at_22e[0x7]; 814 u8 vector_calc[0x1];
815 u8 reserved_at_22f[0x1];
816 u8 imaicl[0x1];
817 u8 reserved_at_231[0x4];
771 u8 qkv[0x1]; 818 u8 qkv[0x1];
772 u8 pkv[0x1]; 819 u8 pkv[0x1];
773 u8 reserved_at_237[0x4]; 820 u8 set_deth_sqpn[0x1];
821 u8 reserved_at_239[0x3];
774 u8 xrc[0x1]; 822 u8 xrc[0x1];
775 u8 ud[0x1]; 823 u8 ud[0x1];
776 u8 uc[0x1]; 824 u8 uc[0x1];
@@ -1208,6 +1256,36 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
1208 u8 reserved_at_640[0x180]; 1256 u8 reserved_at_640[0x180];
1209}; 1257};
1210 1258
1259struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
1260 u8 symbol_error_counter[0x10];
1261
1262 u8 link_error_recovery_counter[0x8];
1263
1264 u8 link_downed_counter[0x8];
1265
1266 u8 port_rcv_errors[0x10];
1267
1268 u8 port_rcv_remote_physical_errors[0x10];
1269
1270 u8 port_rcv_switch_relay_errors[0x10];
1271
1272 u8 port_xmit_discards[0x10];
1273
1274 u8 port_xmit_constraint_errors[0x8];
1275
1276 u8 port_rcv_constraint_errors[0x8];
1277
1278 u8 reserved_at_70[0x8];
1279
1280 u8 link_overrun_errors[0x8];
1281
1282 u8 reserved_at_80[0x10];
1283
1284 u8 vl_15_dropped[0x10];
1285
1286 u8 reserved_at_a0[0xa0];
1287};
1288
1211struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { 1289struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
1212 u8 transmit_queue_high[0x20]; 1290 u8 transmit_queue_high[0x20];
1213 1291
@@ -1780,7 +1858,7 @@ struct mlx5_ifc_qpc_bits {
1780 u8 log_sq_size[0x4]; 1858 u8 log_sq_size[0x4];
1781 u8 reserved_at_55[0x6]; 1859 u8 reserved_at_55[0x6];
1782 u8 rlky[0x1]; 1860 u8 rlky[0x1];
1783 u8 reserved_at_5c[0x4]; 1861 u8 ulp_stateless_offload_mode[0x4];
1784 1862
1785 u8 counter_set_id[0x8]; 1863 u8 counter_set_id[0x8];
1786 u8 uar_page[0x18]; 1864 u8 uar_page[0x18];
@@ -1904,6 +1982,7 @@ union mlx5_ifc_hca_cap_union_bits {
1904 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; 1982 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1905 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; 1983 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
1906 struct mlx5_ifc_e_switch_cap_bits e_switch_cap; 1984 struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
1985 struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
1907 u8 reserved_at_0[0x8000]; 1986 u8 reserved_at_0[0x8000];
1908}; 1987};
1909 1988
@@ -2618,6 +2697,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2618 struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; 2697 struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
2619 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; 2698 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
2620 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; 2699 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
2700 struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
2621 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; 2701 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
2622 u8 reserved_at_0[0x7c0]; 2702 u8 reserved_at_0[0x7c0];
2623}; 2703};
@@ -3126,7 +3206,8 @@ struct mlx5_ifc_query_vport_counter_in_bits {
3126 u8 op_mod[0x10]; 3206 u8 op_mod[0x10];
3127 3207
3128 u8 other_vport[0x1]; 3208 u8 other_vport[0x1];
3129 u8 reserved_at_41[0xf]; 3209 u8 reserved_at_41[0xb];
3210 u8 port_num[0x4];
3130 u8 vport_number[0x10]; 3211 u8 vport_number[0x10];
3131 3212
3132 u8 reserved_at_60[0x60]; 3213 u8 reserved_at_60[0x60];
@@ -3629,6 +3710,12 @@ struct mlx5_ifc_query_hca_vport_pkey_in_bits {
3629 u8 pkey_index[0x10]; 3710 u8 pkey_index[0x10];
3630}; 3711};
3631 3712
3713enum {
3714 MLX5_HCA_VPORT_SEL_PORT_GUID = 1 << 0,
3715 MLX5_HCA_VPORT_SEL_NODE_GUID = 1 << 1,
3716 MLX5_HCA_VPORT_SEL_STATE_POLICY = 1 << 2,
3717};
3718
3632struct mlx5_ifc_query_hca_vport_gid_out_bits { 3719struct mlx5_ifc_query_hca_vport_gid_out_bits {
3633 u8 status[0x8]; 3720 u8 status[0x8];
3634 u8 reserved_at_8[0x18]; 3721 u8 reserved_at_8[0x18];
@@ -6873,6 +6960,54 @@ struct mlx5_ifc_mtt_bits {
6873 u8 rd_en[0x1]; 6960 u8 rd_en[0x1];
6874}; 6961};
6875 6962
6963struct mlx5_ifc_query_wol_rol_out_bits {
6964 u8 status[0x8];
6965 u8 reserved_at_8[0x18];
6966
6967 u8 syndrome[0x20];
6968
6969 u8 reserved_at_40[0x10];
6970 u8 rol_mode[0x8];
6971 u8 wol_mode[0x8];
6972
6973 u8 reserved_at_60[0x20];
6974};
6975
6976struct mlx5_ifc_query_wol_rol_in_bits {
6977 u8 opcode[0x10];
6978 u8 reserved_at_10[0x10];
6979
6980 u8 reserved_at_20[0x10];
6981 u8 op_mod[0x10];
6982
6983 u8 reserved_at_40[0x40];
6984};
6985
6986struct mlx5_ifc_set_wol_rol_out_bits {
6987 u8 status[0x8];
6988 u8 reserved_at_8[0x18];
6989
6990 u8 syndrome[0x20];
6991
6992 u8 reserved_at_40[0x40];
6993};
6994
6995struct mlx5_ifc_set_wol_rol_in_bits {
6996 u8 opcode[0x10];
6997 u8 reserved_at_10[0x10];
6998
6999 u8 reserved_at_20[0x10];
7000 u8 op_mod[0x10];
7001
7002 u8 rol_mode_valid[0x1];
7003 u8 wol_mode_valid[0x1];
7004 u8 reserved_at_42[0xe];
7005 u8 rol_mode[0x8];
7006 u8 wol_mode[0x8];
7007
7008 u8 reserved_at_60[0x20];
7009};
7010
6876enum { 7011enum {
6877 MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, 7012 MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
6878 MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, 7013 MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
@@ -6956,6 +7091,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
6956 struct mlx5_ifc_peir_reg_bits peir_reg; 7091 struct mlx5_ifc_peir_reg_bits peir_reg;
6957 struct mlx5_ifc_pelc_reg_bits pelc_reg; 7092 struct mlx5_ifc_pelc_reg_bits pelc_reg;
6958 struct mlx5_ifc_pfcc_reg_bits pfcc_reg; 7093 struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
7094 struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
6959 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; 7095 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
6960 struct mlx5_ifc_pifr_reg_bits pifr_reg; 7096 struct mlx5_ifc_pifr_reg_bits pifr_reg;
6961 struct mlx5_ifc_pipg_reg_bits pipg_reg; 7097 struct mlx5_ifc_pipg_reg_bits pipg_reg;
@@ -7063,4 +7199,49 @@ struct mlx5_ifc_modify_flow_table_in_bits {
7063 u8 reserved_at_100[0x100]; 7199 u8 reserved_at_100[0x100];
7064}; 7200};
7065 7201
7202struct mlx5_ifc_ets_tcn_config_reg_bits {
7203 u8 g[0x1];
7204 u8 b[0x1];
7205 u8 r[0x1];
7206 u8 reserved_at_3[0x9];
7207 u8 group[0x4];
7208 u8 reserved_at_10[0x9];
7209 u8 bw_allocation[0x7];
7210
7211 u8 reserved_at_20[0xc];
7212 u8 max_bw_units[0x4];
7213 u8 reserved_at_30[0x8];
7214 u8 max_bw_value[0x8];
7215};
7216
7217struct mlx5_ifc_ets_global_config_reg_bits {
7218 u8 reserved_at_0[0x2];
7219 u8 r[0x1];
7220 u8 reserved_at_3[0x1d];
7221
7222 u8 reserved_at_20[0xc];
7223 u8 max_bw_units[0x4];
7224 u8 reserved_at_30[0x8];
7225 u8 max_bw_value[0x8];
7226};
7227
7228struct mlx5_ifc_qetc_reg_bits {
7229 u8 reserved_at_0[0x8];
7230 u8 port_number[0x8];
7231 u8 reserved_at_10[0x30];
7232
7233 struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8];
7234 struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
7235};
7236
7237struct mlx5_ifc_qtct_reg_bits {
7238 u8 reserved_at_0[0x8];
7239 u8 port_number[0x8];
7240 u8 reserved_at_10[0xd];
7241 u8 prio[0x3];
7242
7243 u8 reserved_at_20[0x1d];
7244 u8 tclass[0x3];
7245};
7246
7066#endif /* MLX5_IFC_H */ 7247#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
new file mode 100644
index 000000000000..b30250ab7604
--- /dev/null
+++ b/include/linux/mlx5/port.h
@@ -0,0 +1,87 @@
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_PORT_H__
34#define __MLX5_PORT_H__
35
36#include <linux/mlx5/driver.h>
37
38int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
39int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
40 int ptys_size, int proto_mask, u8 local_port);
41int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
42 u32 *proto_cap, int proto_mask);
43int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
44 u32 *proto_admin, int proto_mask);
45int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
46 u8 *link_width_oper, u8 local_port);
47int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
48 u8 *proto_oper, int proto_mask,
49 u8 local_port);
50int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
51 int proto_mask);
52int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
53 enum mlx5_port_status status);
54int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
55 enum mlx5_port_status *status);
56
57int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
58void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
59void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
60 u8 port);
61
62int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
63 u8 *vl_hw_cap, u8 local_port);
64
65int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
66int mlx5_query_port_pause(struct mlx5_core_dev *dev,
67 u32 *rx_pause, u32 *tx_pause);
68
69int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
70int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
71 u8 *pfc_en_rx);
72
73int mlx5_max_tc(struct mlx5_core_dev *mdev);
74
75int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
76int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
77int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
78int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
79 u8 *max_bw_value,
80 u8 *max_bw_unit);
81int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
82 u8 *max_bw_value,
83 u8 *max_bw_unit);
84int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
85int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
86
87#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 5b8c89ffaa58..cf031a3f16c5 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -499,7 +499,8 @@ struct mlx5_qp_context {
499 u8 reserved2[4]; 499 u8 reserved2[4];
500 __be32 next_send_psn; 500 __be32 next_send_psn;
501 __be32 cqn_send; 501 __be32 cqn_send;
502 u8 reserved3[8]; 502 __be32 deth_sqpn;
503 u8 reserved3[4];
503 __be32 last_acked_psn; 504 __be32 last_acked_psn;
504 __be32 ssn; 505 __be32 ssn;
505 __be32 params2; 506 __be32 params2;
@@ -621,9 +622,9 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u
621 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); 622 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
622} 623}
623 624
624static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) 625static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
625{ 626{
626 return radix_tree_lookup(&dev->priv.mr_table.tree, key); 627 return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
627} 628}
628 629
629struct mlx5_page_fault_resume_mbox_in { 630struct mlx5_page_fault_resume_mbox_in {
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 123771003e68..301da4a5e6bf 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
47 u16 vport, u8 *addr); 47 u16 vport, u8 *addr);
48int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
49int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
48int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 50int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
49 u64 *system_image_guid); 51 u64 *system_image_guid);
50int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 52int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
@@ -92,5 +94,12 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
92 94
93int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); 95int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
94int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); 96int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
97int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
98 int vf, u8 port_num, void *out,
99 size_t out_sz);
100int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
101 u8 other_vport, u8 port_num,
102 int vf,
103 struct mlx5_hca_vport_context *req);
95 104
96#endif /* __MLX5_VPORT_H__ */ 105#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 516e14944339..8f468e0d2534 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -22,6 +22,7 @@
22#include <linux/resource.h> 22#include <linux/resource.h>
23#include <linux/page_ext.h> 23#include <linux/page_ext.h>
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/page_ref.h>
25 26
26struct mempolicy; 27struct mempolicy;
27struct anon_vma; 28struct anon_vma;
@@ -82,6 +83,27 @@ extern int mmap_rnd_compat_bits __read_mostly;
82#define mm_forbids_zeropage(X) (0) 83#define mm_forbids_zeropage(X) (0)
83#endif 84#endif
84 85
86/*
87 * Default maximum number of active map areas, this limits the number of vmas
88 * per mm struct. Users can overwrite this number by sysctl but there is a
89 * problem.
90 *
91 * When a program's coredump is generated as ELF format, a section is created
92 * per a vma. In ELF, the number of sections is represented in unsigned short.
93 * This means the number of sections should be smaller than 65535 at coredump.
94 * Because the kernel adds some informative sections to a image of program at
95 * generating coredump, we need some margin. The number of extra sections is
96 * 1-3 now and depends on arch. We use "5" as safe margin, here.
97 *
98 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
99 * not a hard limit any more. Although some userspace tools can be surprised by
100 * that.
101 */
102#define MAPCOUNT_ELF_CORE_MARGIN (5)
103#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
104
105extern int sysctl_max_map_count;
106
85extern unsigned long sysctl_user_reserve_kbytes; 107extern unsigned long sysctl_user_reserve_kbytes;
86extern unsigned long sysctl_admin_reserve_kbytes; 108extern unsigned long sysctl_admin_reserve_kbytes;
87 109
@@ -122,6 +144,7 @@ extern unsigned int kobjsize(const void *objp);
122 144
123/* 145/*
124 * vm_flags in vm_area_struct, see mm_types.h. 146 * vm_flags in vm_area_struct, see mm_types.h.
147 * When changing, update also include/trace/events/mmflags.h
125 */ 148 */
126#define VM_NONE 0x00000000 149#define VM_NONE 0x00000000
127 150
@@ -170,8 +193,26 @@ extern unsigned int kobjsize(const void *objp);
170#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 193#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
171#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 194#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
172 195
196#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
197#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
198#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
199#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
200#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
201#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
202#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
203#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
204#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
205#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
206
173#if defined(CONFIG_X86) 207#if defined(CONFIG_X86)
174# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 208# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
209#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
210# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
211# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
212# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
213# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
214# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
215#endif
175#elif defined(CONFIG_PPC) 216#elif defined(CONFIG_PPC)
176# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 217# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
177#elif defined(CONFIG_PARISC) 218#elif defined(CONFIG_PARISC)
@@ -233,6 +274,8 @@ extern pgprot_t protection_map[16];
233#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ 274#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
234#define FAULT_FLAG_TRIED 0x20 /* Second try */ 275#define FAULT_FLAG_TRIED 0x20 /* Second try */
235#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ 276#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
277#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
278#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
236 279
237/* 280/*
238 * vm_fault is filled by the the pagefault handler and passed to the vma's 281 * vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -364,8 +407,8 @@ static inline int pmd_devmap(pmd_t pmd)
364 */ 407 */
365static inline int put_page_testzero(struct page *page) 408static inline int put_page_testzero(struct page *page)
366{ 409{
367 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); 410 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
368 return atomic_dec_and_test(&page->_count); 411 return page_ref_dec_and_test(page);
369} 412}
370 413
371/* 414/*
@@ -376,7 +419,7 @@ static inline int put_page_testzero(struct page *page)
376 */ 419 */
377static inline int get_page_unless_zero(struct page *page) 420static inline int get_page_unless_zero(struct page *page)
378{ 421{
379 return atomic_inc_not_zero(&page->_count); 422 return page_ref_add_unless(page, 1, 0);
380} 423}
381 424
382extern int page_is_ram(unsigned long pfn); 425extern int page_is_ram(unsigned long pfn);
@@ -387,7 +430,8 @@ enum {
387 REGION_MIXED, 430 REGION_MIXED,
388}; 431};
389 432
390int region_intersects(resource_size_t offset, size_t size, const char *type); 433int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
434 unsigned long desc);
391 435
392/* Support for virtually mapped pages */ 436/* Support for virtually mapped pages */
393struct page *vmalloc_to_page(const void *addr); 437struct page *vmalloc_to_page(const void *addr);
@@ -456,17 +500,21 @@ static inline int page_mapcount(struct page *page)
456 500
457#ifdef CONFIG_TRANSPARENT_HUGEPAGE 501#ifdef CONFIG_TRANSPARENT_HUGEPAGE
458int total_mapcount(struct page *page); 502int total_mapcount(struct page *page);
503int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
459#else 504#else
460static inline int total_mapcount(struct page *page) 505static inline int total_mapcount(struct page *page)
461{ 506{
462 return page_mapcount(page); 507 return page_mapcount(page);
463} 508}
464#endif 509static inline int page_trans_huge_mapcount(struct page *page,
465 510 int *total_mapcount)
466static inline int page_count(struct page *page)
467{ 511{
468 return atomic_read(&compound_head(page)->_count); 512 int mapcount = page_mapcount(page);
513 if (total_mapcount)
514 *total_mapcount = mapcount;
515 return mapcount;
469} 516}
517#endif
470 518
471static inline struct page *virt_to_head_page(const void *x) 519static inline struct page *virt_to_head_page(const void *x)
472{ 520{
@@ -475,15 +523,6 @@ static inline struct page *virt_to_head_page(const void *x)
475 return compound_head(page); 523 return compound_head(page);
476} 524}
477 525
478/*
479 * Setup the page count before being freed into the page allocator for
480 * the first time (boot or memory hotplug)
481 */
482static inline void init_page_count(struct page *page)
483{
484 atomic_set(&page->_count, 1);
485}
486
487void __put_page(struct page *page); 526void __put_page(struct page *page);
488 527
489void put_pages_list(struct list_head *pages); 528void put_pages_list(struct list_head *pages);
@@ -593,7 +632,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
593 * 632 *
594 * A page may belong to an inode's memory mapping. In this case, page->mapping 633 * A page may belong to an inode's memory mapping. In this case, page->mapping
595 * is the pointer to the inode, and page->index is the file offset of the page, 634 * is the pointer to the inode, and page->index is the file offset of the page,
596 * in units of PAGE_CACHE_SIZE. 635 * in units of PAGE_SIZE.
597 * 636 *
598 * If pagecache pages are not associated with an inode, they are said to be 637 * If pagecache pages are not associated with an inode, they are said to be
599 * anonymous pages. These may become associated with the swapcache, and in that 638 * anonymous pages. These may become associated with the swapcache, and in that
@@ -693,8 +732,8 @@ static inline void get_page(struct page *page)
693 * Getting a normal page or the head of a compound page 732 * Getting a normal page or the head of a compound page
694 * requires to already have an elevated page->_count. 733 * requires to already have an elevated page->_count.
695 */ 734 */
696 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); 735 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
697 atomic_inc(&page->_count); 736 page_ref_inc(page);
698 737
699 if (unlikely(is_zone_device_page(page))) 738 if (unlikely(is_zone_device_page(page)))
700 get_zone_device_page(page); 739 get_zone_device_page(page);
@@ -904,20 +943,11 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
904{ 943{
905 return page->mem_cgroup; 944 return page->mem_cgroup;
906} 945}
907
908static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
909{
910 page->mem_cgroup = memcg;
911}
912#else 946#else
913static inline struct mem_cgroup *page_memcg(struct page *page) 947static inline struct mem_cgroup *page_memcg(struct page *page)
914{ 948{
915 return NULL; 949 return NULL;
916} 950}
917
918static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
919{
920}
921#endif 951#endif
922 952
923/* 953/*
@@ -1010,6 +1040,8 @@ static inline bool page_mapped(struct page *page)
1010 page = compound_head(page); 1040 page = compound_head(page);
1011 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 1041 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
1012 return true; 1042 return true;
1043 if (PageHuge(page))
1044 return false;
1013 for (i = 0; i < hpage_nr_pages(page); i++) { 1045 for (i = 0; i < hpage_nr_pages(page); i++) {
1014 if (atomic_read(&page[i]._mapcount) >= 0) 1046 if (atomic_read(&page[i]._mapcount) >= 0)
1015 return true; 1047 return true;
@@ -1051,8 +1083,6 @@ static inline void clear_page_pfmemalloc(struct page *page)
1051 * just gets major/minor fault counters bumped up. 1083 * just gets major/minor fault counters bumped up.
1052 */ 1084 */
1053 1085
1054#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
1055
1056#define VM_FAULT_OOM 0x0001 1086#define VM_FAULT_OOM 0x0001
1057#define VM_FAULT_SIGBUS 0x0002 1087#define VM_FAULT_SIGBUS 0x0002
1058#define VM_FAULT_MAJOR 0x0004 1088#define VM_FAULT_MAJOR 0x0004
@@ -1113,10 +1143,14 @@ struct zap_details {
1113 struct address_space *check_mapping; /* Check page->mapping if set */ 1143 struct address_space *check_mapping; /* Check page->mapping if set */
1114 pgoff_t first_index; /* Lowest page->index to unmap */ 1144 pgoff_t first_index; /* Lowest page->index to unmap */
1115 pgoff_t last_index; /* Highest page->index to unmap */ 1145 pgoff_t last_index; /* Highest page->index to unmap */
1146 bool ignore_dirty; /* Ignore dirty pages */
1147 bool check_swap_entries; /* Check also swap entries */
1116}; 1148};
1117 1149
1118struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1150struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1119 pte_t pte); 1151 pte_t pte);
1152struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1153 pmd_t pmd);
1120 1154
1121int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1155int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1122 unsigned long size); 1156 unsigned long size);
@@ -1225,20 +1259,20 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1225 unsigned long start, unsigned long nr_pages, 1259 unsigned long start, unsigned long nr_pages,
1226 unsigned int foll_flags, struct page **pages, 1260 unsigned int foll_flags, struct page **pages,
1227 struct vm_area_struct **vmas, int *nonblocking); 1261 struct vm_area_struct **vmas, int *nonblocking);
1228long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1262long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1229 unsigned long start, unsigned long nr_pages, 1263 unsigned long start, unsigned long nr_pages,
1230 int write, int force, struct page **pages, 1264 int write, int force, struct page **pages,
1231 struct vm_area_struct **vmas); 1265 struct vm_area_struct **vmas);
1232long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 1266long get_user_pages(unsigned long start, unsigned long nr_pages,
1233 unsigned long start, unsigned long nr_pages, 1267 int write, int force, struct page **pages,
1234 int write, int force, struct page **pages, 1268 struct vm_area_struct **vmas);
1235 int *locked); 1269long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1270 int write, int force, struct page **pages, int *locked);
1236long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1271long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1237 unsigned long start, unsigned long nr_pages, 1272 unsigned long start, unsigned long nr_pages,
1238 int write, int force, struct page **pages, 1273 int write, int force, struct page **pages,
1239 unsigned int gup_flags); 1274 unsigned int gup_flags);
1240long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1275long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1241 unsigned long start, unsigned long nr_pages,
1242 int write, int force, struct page **pages); 1276 int write, int force, struct page **pages);
1243int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1277int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1244 struct page **pages); 1278 struct page **pages);
@@ -1299,10 +1333,9 @@ int __set_page_dirty_nobuffers(struct page *page);
1299int __set_page_dirty_no_writeback(struct page *page); 1333int __set_page_dirty_no_writeback(struct page *page);
1300int redirty_page_for_writepage(struct writeback_control *wbc, 1334int redirty_page_for_writepage(struct writeback_control *wbc,
1301 struct page *page); 1335 struct page *page);
1302void account_page_dirtied(struct page *page, struct address_space *mapping, 1336void account_page_dirtied(struct page *page, struct address_space *mapping);
1303 struct mem_cgroup *memcg);
1304void account_page_cleaned(struct page *page, struct address_space *mapping, 1337void account_page_cleaned(struct page *page, struct address_space *mapping,
1305 struct mem_cgroup *memcg, struct bdi_writeback *wb); 1338 struct bdi_writeback *wb);
1306int set_page_dirty(struct page *page); 1339int set_page_dirty(struct page *page);
1307int set_page_dirty_lock(struct page *page); 1340int set_page_dirty_lock(struct page *page);
1308void cancel_dirty_page(struct page *page); 1341void cancel_dirty_page(struct page *page);
@@ -1532,8 +1565,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1532} 1565}
1533#endif 1566#endif
1534 1567
1535int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 1568int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1536 pmd_t *pmd, unsigned long address);
1537int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1569int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1538 1570
1539/* 1571/*
@@ -1659,15 +1691,15 @@ static inline void pgtable_page_dtor(struct page *page)
1659 pte_unmap(pte); \ 1691 pte_unmap(pte); \
1660} while (0) 1692} while (0)
1661 1693
1662#define pte_alloc_map(mm, vma, pmd, address) \ 1694#define pte_alloc(mm, pmd, address) \
1663 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ 1695 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1664 pmd, address))? \ 1696
1665 NULL: pte_offset_map(pmd, address)) 1697#define pte_alloc_map(mm, pmd, address) \
1698 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1666 1699
1667#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1700#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1668 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ 1701 (pte_alloc(mm, pmd, address) ? \
1669 pmd, address))? \ 1702 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1670 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1671 1703
1672#define pte_alloc_kernel(pmd, address) \ 1704#define pte_alloc_kernel(pmd, address) \
1673 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1705 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
@@ -1862,6 +1894,7 @@ extern int __meminit init_per_zone_wmark_min(void);
1862extern void mem_init(void); 1894extern void mem_init(void);
1863extern void __init mmap_init(void); 1895extern void __init mmap_init(void);
1864extern void show_mem(unsigned int flags); 1896extern void show_mem(unsigned int flags);
1897extern long si_mem_available(void);
1865extern void si_meminfo(struct sysinfo * val); 1898extern void si_meminfo(struct sysinfo * val);
1866extern void si_meminfo_node(struct sysinfo *val, int nid); 1899extern void si_meminfo_node(struct sysinfo *val, int nid);
1867 1900
@@ -1876,6 +1909,7 @@ extern void zone_pcp_reset(struct zone *zone);
1876 1909
1877/* page_alloc.c */ 1910/* page_alloc.c */
1878extern int min_free_kbytes; 1911extern int min_free_kbytes;
1912extern int watermark_scale_factor;
1879 1913
1880/* nommu.c */ 1914/* nommu.c */
1881extern atomic_long_t mmap_pages_allocated; 1915extern atomic_long_t mmap_pages_allocated;
@@ -2138,6 +2172,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2138int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2172int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2139int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2173int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2140 unsigned long pfn); 2174 unsigned long pfn);
2175int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2176 unsigned long pfn, pgprot_t pgprot);
2141int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2177int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2142 pfn_t pfn); 2178 pfn_t pfn);
2143int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2179int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2168,6 +2204,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
2168#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2204#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
2169#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2205#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
2170#define FOLL_MLOCK 0x1000 /* lock present pages */ 2206#define FOLL_MLOCK 0x1000 /* lock present pages */
2207#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2171 2208
2172typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2209typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2173 void *data); 2210 void *data);
@@ -2175,6 +2212,17 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2175 unsigned long size, pte_fn_t fn, void *data); 2212 unsigned long size, pte_fn_t fn, void *data);
2176 2213
2177 2214
2215#ifdef CONFIG_PAGE_POISONING
2216extern bool page_poisoning_enabled(void);
2217extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2218extern bool page_is_poisoned(struct page *page);
2219#else
2220static inline bool page_poisoning_enabled(void) { return false; }
2221static inline void kernel_poison_pages(struct page *page, int numpages,
2222 int enable) { }
2223static inline bool page_is_poisoned(struct page *page) { return false; }
2224#endif
2225
2178#ifdef CONFIG_DEBUG_PAGEALLOC 2226#ifdef CONFIG_DEBUG_PAGEALLOC
2179extern bool _debug_pagealloc_enabled; 2227extern bool _debug_pagealloc_enabled;
2180extern void __kernel_map_pages(struct page *page, int numpages, int enable); 2228extern void __kernel_map_pages(struct page *page, int numpages, int enable);
@@ -2194,14 +2242,18 @@ kernel_map_pages(struct page *page, int numpages, int enable)
2194} 2242}
2195#ifdef CONFIG_HIBERNATION 2243#ifdef CONFIG_HIBERNATION
2196extern bool kernel_page_present(struct page *page); 2244extern bool kernel_page_present(struct page *page);
2197#endif /* CONFIG_HIBERNATION */ 2245#endif /* CONFIG_HIBERNATION */
2198#else 2246#else /* CONFIG_DEBUG_PAGEALLOC */
2199static inline void 2247static inline void
2200kernel_map_pages(struct page *page, int numpages, int enable) {} 2248kernel_map_pages(struct page *page, int numpages, int enable) {}
2201#ifdef CONFIG_HIBERNATION 2249#ifdef CONFIG_HIBERNATION
2202static inline bool kernel_page_present(struct page *page) { return true; } 2250static inline bool kernel_page_present(struct page *page) { return true; }
2203#endif /* CONFIG_HIBERNATION */ 2251#endif /* CONFIG_HIBERNATION */
2204#endif 2252static inline bool debug_pagealloc_enabled(void)
2253{
2254 return false;
2255}
2256#endif /* CONFIG_DEBUG_PAGEALLOC */
2205 2257
2206#ifdef __HAVE_ARCH_GATE_AREA 2258#ifdef __HAVE_ARCH_GATE_AREA
2207extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2259extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 624b78b848b8..c2d75b4fa86c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct vm_area_struct {
341 341
342 /* Information about our backing store: */ 342 /* Information about our backing store: */
343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
344 units, *not* PAGE_CACHE_SIZE */ 344 units */
345 struct file * vm_file; /* File we map to (can be NULL). */ 345 struct file * vm_file; /* File we map to (can be NULL). */
346 void * vm_private_data; /* was vm_pte (shared mem) */ 346 void * vm_private_data; /* was vm_pte (shared mem) */
347 347
@@ -566,10 +566,26 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm)
566} 566}
567#endif 567#endif
568 568
569struct vm_special_mapping 569struct vm_fault;
570{ 570
571 const char *name; 571struct vm_special_mapping {
572 const char *name; /* The name, e.g. "[vdso]". */
573
574 /*
575 * If .fault is not provided, this points to a
576 * NULL-terminated array of pages that back the special mapping.
577 *
578 * This must not be NULL unless .fault is provided.
579 */
572 struct page **pages; 580 struct page **pages;
581
582 /*
583 * If non-NULL, then this is called to resolve page faults
584 * on the special mapping. If used, .pages is not checked.
585 */
586 int (*fault)(const struct vm_special_mapping *sm,
587 struct vm_area_struct *vma,
588 struct vm_fault *vmf);
573}; 589};
574 590
575enum tlb_flush_reason { 591enum tlb_flush_reason {
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 16373c8f5f57..33e17f6a327a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -35,7 +35,7 @@ static inline void vm_unacct_memory(long pages)
35 */ 35 */
36 36
37#ifndef arch_calc_vm_prot_bits 37#ifndef arch_calc_vm_prot_bits
38#define arch_calc_vm_prot_bits(prot) 0 38#define arch_calc_vm_prot_bits(prot, pkey) 0
39#endif 39#endif
40 40
41#ifndef arch_vm_get_page_prot 41#ifndef arch_vm_get_page_prot
@@ -70,12 +70,12 @@ static inline int arch_validate_prot(unsigned long prot)
70 * Combine the mmap "prot" argument into "vm_flags" used internally. 70 * Combine the mmap "prot" argument into "vm_flags" used internally.
71 */ 71 */
72static inline unsigned long 72static inline unsigned long
73calc_vm_prot_bits(unsigned long prot) 73calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
74{ 74{
75 return _calc_vm_trans(prot, PROT_READ, VM_READ ) | 75 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
76 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | 76 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
77 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | 77 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
78 arch_calc_vm_prot_bits(prot); 78 arch_calc_vm_prot_bits(prot, pkey);
79} 79}
80 80
81/* 81/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 37967b6da03c..b01e77de1a74 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -113,7 +113,6 @@ struct mmc_data {
113 113
114#define MMC_DATA_WRITE (1 << 8) 114#define MMC_DATA_WRITE (1 << 8)
115#define MMC_DATA_READ (1 << 9) 115#define MMC_DATA_READ (1 << 9)
116#define MMC_DATA_STREAM (1 << 10)
117 116
118 unsigned int bytes_xfered; 117 unsigned int bytes_xfered;
119 118
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 89df7abedd67..7b41c6db1bb6 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -235,21 +235,11 @@ struct dw_mci_dma_ops {
235}; 235};
236 236
237/* IP Quirks/flags. */ 237/* IP Quirks/flags. */
238/* Unreliable card detection */
239#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(0)
240/* Timer for broken data transfer over scheme */ 238/* Timer for broken data transfer over scheme */
241#define DW_MCI_QUIRK_BROKEN_DTO BIT(1) 239#define DW_MCI_QUIRK_BROKEN_DTO BIT(0)
242 240
243struct dma_pdata; 241struct dma_pdata;
244 242
245struct block_settings {
246 unsigned short max_segs; /* see blk_queue_max_segments */
247 unsigned int max_blk_size; /* maximum size of one mmc block */
248 unsigned int max_blk_count; /* maximum number of blocks in one req*/
249 unsigned int max_req_size; /* maximum number of bytes in one req*/
250 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
251};
252
253/* Board platform data */ 243/* Board platform data */
254struct dw_mci_board { 244struct dw_mci_board {
255 u32 num_slots; 245 u32 num_slots;
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
index 84d9053b5dca..5f5cd80e9765 100644
--- a/include/linux/mmc/tmio.h
+++ b/include/linux/mmc/tmio.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * include/linux/mmc/tmio.h 2 * include/linux/mmc/tmio.h
3 * 3 *
4 * Copyright (C) 2016 Sang Engineering, Wolfram Sang
5 * Copyright (C) 2015-16 Renesas Electronics Corporation
4 * Copyright (C) 2007 Ian Molton 6 * Copyright (C) 2007 Ian Molton
5 * Copyright (C) 2004 Ian Molton 7 * Copyright (C) 2004 Ian Molton
6 * 8 *
@@ -61,6 +63,9 @@
61#define TMIO_STAT_CMD_BUSY 0x40000000 63#define TMIO_STAT_CMD_BUSY 0x40000000
62#define TMIO_STAT_ILL_ACCESS 0x80000000 64#define TMIO_STAT_ILL_ACCESS 0x80000000
63 65
66#define CLK_CTL_DIV_MASK 0xff
67#define CLK_CTL_SCLKEN BIT(8)
68
64#define TMIO_BBS 512 /* Boot block size */ 69#define TMIO_BBS 512 /* Boot block size */
65 70
66#endif /* LINUX_MMC_TMIO_H */ 71#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 053824b0a412..de7be78c6f0e 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,8 +9,7 @@ struct vm_area_struct;
9struct mm_struct; 9struct mm_struct;
10 10
11extern void dump_page(struct page *page, const char *reason); 11extern void dump_page(struct page *page, const char *reason);
12extern void dump_page_badflags(struct page *page, const char *reason, 12extern void __dump_page(struct page *page, const char *reason);
13 unsigned long badflags);
14void dump_vma(const struct vm_area_struct *vma); 13void dump_vma(const struct vm_area_struct *vma);
15void dump_mm(const struct mm_struct *mm); 14void dump_mm(const struct mm_struct *mm);
16 15
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7b6c2cfee390..c60df9257cc7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,6 +63,9 @@ enum {
63 MIGRATE_TYPES 63 MIGRATE_TYPES
64}; 64};
65 65
66/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
67extern char * const migratetype_names[MIGRATE_TYPES];
68
66#ifdef CONFIG_CMA 69#ifdef CONFIG_CMA
67# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
68#else 71#else
@@ -209,10 +212,12 @@ struct zone_reclaim_stat {
209}; 212};
210 213
211struct lruvec { 214struct lruvec {
212 struct list_head lists[NR_LRU_LISTS]; 215 struct list_head lists[NR_LRU_LISTS];
213 struct zone_reclaim_stat reclaim_stat; 216 struct zone_reclaim_stat reclaim_stat;
217 /* Evictions & activations on the inactive file list */
218 atomic_long_t inactive_age;
214#ifdef CONFIG_MEMCG 219#ifdef CONFIG_MEMCG
215 struct zone *zone; 220 struct zone *zone;
216#endif 221#endif
217}; 222};
218 223
@@ -487,9 +492,6 @@ struct zone {
487 spinlock_t lru_lock; 492 spinlock_t lru_lock;
488 struct lruvec lruvec; 493 struct lruvec lruvec;
489 494
490 /* Evictions & activations on the inactive file list */
491 atomic_long_t inactive_age;
492
493 /* 495 /*
494 * When free pages are below this point, additional steps are taken 496 * When free pages are below this point, additional steps are taken
495 * when reading the number of free pages to avoid per-cpu counter 497 * when reading the number of free pages to avoid per-cpu counter
@@ -520,6 +522,8 @@ struct zone {
520 bool compact_blockskip_flush; 522 bool compact_blockskip_flush;
521#endif 523#endif
522 524
525 bool contiguous;
526
523 ZONE_PADDING(_pad3_) 527 ZONE_PADDING(_pad3_)
524 /* Zone statistics */ 528 /* Zone statistics */
525 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 529 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
@@ -664,6 +668,12 @@ typedef struct pglist_data {
664 mem_hotplug_begin/end() */ 668 mem_hotplug_begin/end() */
665 int kswapd_max_order; 669 int kswapd_max_order;
666 enum zone_type classzone_idx; 670 enum zone_type classzone_idx;
671#ifdef CONFIG_COMPACTION
672 int kcompactd_max_order;
673 enum zone_type kcompactd_classzone_idx;
674 wait_queue_head_t kcompactd_wait;
675 struct task_struct *kcompactd;
676#endif
667#ifdef CONFIG_NUMA_BALANCING 677#ifdef CONFIG_NUMA_BALANCING
668 /* Lock serializing the migrate rate limiting window */ 678 /* Lock serializing the migrate rate limiting window */
669 spinlock_t numabalancing_migrate_lock; 679 spinlock_t numabalancing_migrate_lock;
@@ -758,6 +768,8 @@ static inline struct zone *lruvec_zone(struct lruvec *lruvec)
758#endif 768#endif
759} 769}
760 770
771extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
772
761#ifdef CONFIG_HAVE_MEMORY_PRESENT 773#ifdef CONFIG_HAVE_MEMORY_PRESENT
762void memory_present(int nid, unsigned long start, unsigned long end); 774void memory_present(int nid, unsigned long start, unsigned long end);
763#else 775#else
@@ -829,6 +841,8 @@ static inline int is_highmem(struct zone *zone)
829struct ctl_table; 841struct ctl_table;
830int min_free_kbytes_sysctl_handler(struct ctl_table *, int, 842int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
831 void __user *, size_t *, loff_t *); 843 void __user *, size_t *, loff_t *);
844int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
845 void __user *, size_t *, loff_t *);
832extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; 846extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
833int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, 847int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
834 void __user *, size_t *, loff_t *); 848 void __user *, size_t *, loff_t *);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index a2a0068a8387..8b425c66305a 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -33,6 +33,14 @@ struct platform_msi_desc {
33}; 33};
34 34
35/** 35/**
36 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
37 * @msi_index: The index of the MSI descriptor
38 */
39struct fsl_mc_msi_desc {
40 u16 msi_index;
41};
42
43/**
36 * struct msi_desc - Descriptor structure for MSI based interrupts 44 * struct msi_desc - Descriptor structure for MSI based interrupts
37 * @list: List head for management 45 * @list: List head for management
38 * @irq: The base interrupt number 46 * @irq: The base interrupt number
@@ -87,6 +95,7 @@ struct msi_desc {
87 * tree wide cleanup. 95 * tree wide cleanup.
88 */ 96 */
89 struct platform_msi_desc platform; 97 struct platform_msi_desc platform;
98 struct fsl_mc_msi_desc fsl_mc;
90 }; 99 };
91}; 100};
92 101
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 36bb6a503f19..3bf8f954b642 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -166,7 +166,6 @@ struct bbm_info {
166}; 166};
167 167
168/* OneNAND BBT interface */ 168/* OneNAND BBT interface */
169extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
170extern int onenand_default_bbt(struct mtd_info *mtd); 169extern int onenand_default_bbt(struct mtd_info *mtd);
171 170
172#endif /* __LINUX_MTD_BBM_H */ 171#endif /* __LINUX_MTD_BBM_H */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 02cd5f9b79b8..8255118be0f0 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -44,7 +44,6 @@ struct INFTLrecord {
44 unsigned int nb_blocks; /* number of physical blocks */ 44 unsigned int nb_blocks; /* number of physical blocks */
45 unsigned int nb_boot_blocks; /* number of blocks used by the bios */ 45 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
46 struct erase_info instr; 46 struct erase_info instr;
47 struct nand_ecclayout oobinfo;
48}; 47};
49 48
50int INFTL_mount(struct INFTLrecord *s); 49int INFTL_mount(struct INFTLrecord *s);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 58f3ba709ade..5e0eb7ccabd4 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -240,8 +240,11 @@ struct map_info {
240 If there is no cache to care about this can be set to NULL. */ 240 If there is no cache to care about this can be set to NULL. */
241 void (*inval_cache)(struct map_info *, unsigned long, ssize_t); 241 void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
242 242
243 /* set_vpp() must handle being reentered -- enable, enable, disable 243 /* This will be called with 1 as parameter when the first map user
244 must leave it enabled. */ 244 * needs VPP, and called with 0 when the last user exits. The map
245 * core maintains a reference counter, and assumes that VPP is a
246 * global resource applying to all mapped flash chips on the system.
247 */
245 void (*set_vpp)(struct map_info *, int); 248 void (*set_vpp)(struct map_info *, int);
246 249
247 unsigned long pfow_base; 250 unsigned long pfow_base;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cc84923011c0..771272187316 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -105,7 +105,6 @@ struct mtd_oob_ops {
105struct nand_ecclayout { 105struct nand_ecclayout {
106 __u32 eccbytes; 106 __u32 eccbytes;
107 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; 107 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
108 __u32 oobavail;
109 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; 108 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
110}; 109};
111 110
@@ -265,6 +264,11 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
265 return mtd->dev.of_node; 264 return mtd->dev.of_node;
266} 265}
267 266
267static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
268{
269 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
270}
271
268int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); 272int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
269int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 273int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
270 void **virt, resource_size_t *phys); 274 void **virt, resource_size_t *phys);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index bdd68e22b5a5..56574ba36555 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -168,6 +168,12 @@ typedef enum {
168/* Device supports subpage reads */ 168/* Device supports subpage reads */
169#define NAND_SUBPAGE_READ 0x00001000 169#define NAND_SUBPAGE_READ 0x00001000
170 170
171/*
172 * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
173 * patterns.
174 */
175#define NAND_NEED_SCRAMBLING 0x00002000
176
171/* Options valid for Samsung large page devices */ 177/* Options valid for Samsung large page devices */
172#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG 178#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
173 179
@@ -666,7 +672,7 @@ struct nand_chip {
666 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); 672 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
667 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); 673 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
668 void (*select_chip)(struct mtd_info *mtd, int chip); 674 void (*select_chip)(struct mtd_info *mtd, int chip);
669 int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); 675 int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
670 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); 676 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
671 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); 677 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
672 int (*dev_ready)(struct mtd_info *mtd); 678 int (*dev_ready)(struct mtd_info *mtd);
@@ -896,7 +902,6 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
896 * @chip_delay: R/B delay value in us 902 * @chip_delay: R/B delay value in us
897 * @options: Option flags, e.g. 16bit buswidth 903 * @options: Option flags, e.g. 16bit buswidth
898 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH 904 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
899 * @ecclayout: ECC layout info structure
900 * @part_probe_types: NULL-terminated array of probe types 905 * @part_probe_types: NULL-terminated array of probe types
901 */ 906 */
902struct platform_nand_chip { 907struct platform_nand_chip {
@@ -904,7 +909,6 @@ struct platform_nand_chip {
904 int chip_offset; 909 int chip_offset;
905 int nr_partitions; 910 int nr_partitions;
906 struct mtd_partition *partitions; 911 struct mtd_partition *partitions;
907 struct nand_ecclayout *ecclayout;
908 int chip_delay; 912 int chip_delay;
909 unsigned int options; 913 unsigned int options;
910 unsigned int bbt_options; 914 unsigned int bbt_options;
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index fb0bc3420a10..98f20ef05d60 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -32,9 +32,7 @@ int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
32/* 32/*
33 * Initialize BCH encoder/decoder 33 * Initialize BCH encoder/decoder
34 */ 34 */
35struct nand_bch_control * 35struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
36nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
37 unsigned int eccbytes, struct nand_ecclayout **ecclayout);
38/* 36/*
39 * Release BCH encoder/decoder resources 37 * Release BCH encoder/decoder resources
40 */ 38 */
@@ -58,9 +56,7 @@ nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
58 return -ENOTSUPP; 56 return -ENOTSUPP;
59} 57}
60 58
61static inline struct nand_bch_control * 59static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
62nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
63 unsigned int eccbytes, struct nand_ecclayout **ecclayout)
64{ 60{
65 return NULL; 61 return NULL;
66} 62}
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
index b059629e22bc..044daa02b8ff 100644
--- a/include/linux/mtd/nftl.h
+++ b/include/linux/mtd/nftl.h
@@ -50,7 +50,6 @@ struct NFTLrecord {
50 unsigned int nb_blocks; /* number of physical blocks */ 50 unsigned int nb_blocks; /* number of physical blocks */
51 unsigned int nb_boot_blocks; /* number of blocks used by the bios */ 51 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
52 struct erase_info instr; 52 struct erase_info instr;
53 struct nand_ecclayout oobinfo;
54}; 53};
55 54
56int NFTL_mount(struct NFTLrecord *s); 55int NFTL_mount(struct NFTLrecord *s);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 62356d50815b..3c36113a88e1 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -85,6 +85,7 @@
85#define SR_BP0 BIT(2) /* Block protect 0 */ 85#define SR_BP0 BIT(2) /* Block protect 0 */
86#define SR_BP1 BIT(3) /* Block protect 1 */ 86#define SR_BP1 BIT(3) /* Block protect 1 */
87#define SR_BP2 BIT(4) /* Block protect 2 */ 87#define SR_BP2 BIT(4) /* Block protect 2 */
88#define SR_TB BIT(5) /* Top/Bottom protect */
88#define SR_SRWD BIT(7) /* SR write protect */ 89#define SR_SRWD BIT(7) /* SR write protect */
89 90
90#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ 91#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
@@ -116,6 +117,7 @@ enum spi_nor_ops {
116 117
117enum spi_nor_option_flags { 118enum spi_nor_option_flags {
118 SNOR_F_USE_FSR = BIT(0), 119 SNOR_F_USE_FSR = BIT(0),
120 SNOR_F_HAS_SR_TB = BIT(1),
119}; 121};
120 122
121/** 123/**
diff --git a/include/linux/namei.h b/include/linux/namei.h
index d0f25d81b46a..ec5ec2818a28 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -31,6 +31,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
31#define LOOKUP_PARENT 0x0010 31#define LOOKUP_PARENT 0x0010
32#define LOOKUP_REVAL 0x0020 32#define LOOKUP_REVAL 0x0020
33#define LOOKUP_RCU 0x0040 33#define LOOKUP_RCU 0x0040
34#define LOOKUP_NO_REVAL 0x0080
34 35
35/* 36/*
36 * Intent data 37 * Intent data
@@ -78,6 +79,8 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
78 79
79extern struct dentry *lookup_one_len(const char *, struct dentry *, int); 80extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
80extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); 81extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
82struct qstr;
83extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
81 84
82extern int follow_down_one(struct path *); 85extern int follow_down_one(struct path *);
83extern int follow_down(struct path *); 86extern int follow_down(struct path *);
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 507e47c86737..5489ab756d1a 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -16,11 +16,16 @@
16#include <linux/ndctl.h> 16#include <linux/ndctl.h>
17#include <linux/device.h> 17#include <linux/device.h>
18 18
19enum nvdimm_event {
20 NVDIMM_REVALIDATE_POISON,
21};
22
19struct nd_device_driver { 23struct nd_device_driver {
20 struct device_driver drv; 24 struct device_driver drv;
21 unsigned long type; 25 unsigned long type;
22 int (*probe)(struct device *dev); 26 int (*probe)(struct device *dev);
23 int (*remove)(struct device *dev); 27 int (*remove)(struct device *dev);
28 void (*notify)(struct device *dev, enum nvdimm_event event);
24}; 29};
25 30
26static inline struct nd_device_driver *to_nd_device_driver( 31static inline struct nd_device_driver *to_nd_device_driver(
@@ -144,6 +149,8 @@ static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
144 MODULE_ALIAS("nd:t" __stringify(type) "*") 149 MODULE_ALIAS("nd:t" __stringify(type) "*")
145#define ND_DEVICE_MODALIAS_FMT "nd:t%d" 150#define ND_DEVICE_MODALIAS_FMT "nd:t%d"
146 151
152struct nd_region;
153void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
147int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, 154int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
148 struct module *module, const char *mod_name); 155 struct module *module, const char *mod_name);
149#define nd_driver_register(driver) \ 156#define nd_driver_register(driver) \
diff --git a/include/linux/net.h b/include/linux/net.h
index 0b4ac7da583a..f840d77c6c31 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -215,6 +215,7 @@ int __sock_create(struct net *net, int family, int type, int proto,
215int sock_create(int family, int type, int proto, struct socket **res); 215int sock_create(int family, int type, int proto, struct socket **res);
216int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res); 216int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
217int sock_create_lite(int family, int type, int proto, struct socket **res); 217int sock_create_lite(int family, int type, int proto, struct socket **res);
218struct socket *sock_alloc(void);
218void sock_release(struct socket *sock); 219void sock_release(struct socket *sock);
219int sock_sendmsg(struct socket *sock, struct msghdr *msg); 220int sock_sendmsg(struct socket *sock, struct msghdr *msg);
220int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 221int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
@@ -245,7 +246,15 @@ do { \
245 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) 246 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
246#define net_info_ratelimited(fmt, ...) \ 247#define net_info_ratelimited(fmt, ...) \
247 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) 248 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
248#if defined(DEBUG) 249#if defined(CONFIG_DYNAMIC_DEBUG)
250#define net_dbg_ratelimited(fmt, ...) \
251do { \
252 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
253 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
254 net_ratelimit()) \
255 __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
256} while (0)
257#elif defined(DEBUG)
249#define net_dbg_ratelimited(fmt, ...) \ 258#define net_dbg_ratelimited(fmt, ...) \
250 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) 259 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
251#else 260#else
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index d9654f0eecb3..a734bf43d190 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -67,6 +67,8 @@ enum {
67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ 67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */ 68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */
69 69
70 NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
71
70 /* 72 /*
71 * Add your fresh new feature above and remember to update 73 * Add your fresh new feature above and remember to update
72 * netdev_features_strings[] in net/core/ethtool.c and maybe 74 * netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -124,6 +126,7 @@ enum {
124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 126#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
125#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) 127#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
126#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) 128#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
129#define NETIF_F_HW_TC __NETIF_F(HW_TC)
127 130
128#define for_each_netdev_feature(mask_addr, bit) \ 131#define for_each_netdev_feature(mask_addr, bit) \
129 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) 132 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5440b7b705eb..78181a88903b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -51,6 +51,7 @@
51#include <linux/neighbour.h> 51#include <linux/neighbour.h>
52#include <uapi/linux/netdevice.h> 52#include <uapi/linux/netdevice.h>
53#include <uapi/linux/if_bonding.h> 53#include <uapi/linux/if_bonding.h>
54#include <uapi/linux/pkt_cls.h>
54 55
55struct netpoll_info; 56struct netpoll_info;
56struct device; 57struct device;
@@ -80,8 +81,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
80 * function. Real network devices commonly used with qdiscs should only return 81 * function. Real network devices commonly used with qdiscs should only return
81 * the driver transmit return codes though - when qdiscs are used, the actual 82 * the driver transmit return codes though - when qdiscs are used, the actual
82 * transmission happens asynchronously, so the value is not propagated to 83 * transmission happens asynchronously, so the value is not propagated to
83 * higher layers. Virtual network devices transmit synchronously, in this case 84 * higher layers. Virtual network devices transmit synchronously; in this case
84 * the driver transmit return codes are consumed by dev_queue_xmit(), all 85 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
85 * others are propagated to higher layers. 86 * others are propagated to higher layers.
86 */ 87 */
87 88
@@ -128,7 +129,7 @@ static inline bool dev_xmit_complete(int rc)
128} 129}
129 130
130/* 131/*
131 * Compute the worst case header length according to the protocols 132 * Compute the worst-case header length according to the protocols
132 * used. 133 * used.
133 */ 134 */
134 135
@@ -245,7 +246,7 @@ struct hh_cache {
245 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 246 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
246}; 247};
247 248
248/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 249/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
249 * Alternative is: 250 * Alternative is:
250 * dev->hard_header_len ? (dev->hard_header_len + 251 * dev->hard_header_len ? (dev->hard_header_len +
251 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 252 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
@@ -267,10 +268,11 @@ struct header_ops {
267 void (*cache_update)(struct hh_cache *hh, 268 void (*cache_update)(struct hh_cache *hh,
268 const struct net_device *dev, 269 const struct net_device *dev,
269 const unsigned char *haddr); 270 const unsigned char *haddr);
271 bool (*validate)(const char *ll_header, unsigned int len);
270}; 272};
271 273
272/* These flag bits are private to the generic network queueing 274/* These flag bits are private to the generic network queueing
273 * layer, they may not be explicitly referenced by any other 275 * layer; they may not be explicitly referenced by any other
274 * code. 276 * code.
275 */ 277 */
276 278
@@ -284,7 +286,7 @@ enum netdev_state_t {
284 286
285 287
286/* 288/*
287 * This structure holds at boot time configured netdevice settings. They 289 * This structure holds boot-time configured netdevice settings. They
288 * are then used in the device probing. 290 * are then used in the device probing.
289 */ 291 */
290struct netdev_boot_setup { 292struct netdev_boot_setup {
@@ -302,7 +304,7 @@ struct napi_struct {
302 /* The poll_list must only be managed by the entity which 304 /* The poll_list must only be managed by the entity which
303 * changes the state of the NAPI_STATE_SCHED bit. This means 305 * changes the state of the NAPI_STATE_SCHED bit. This means
304 * whoever atomically sets that bit can add this napi_struct 306 * whoever atomically sets that bit can add this napi_struct
305 * to the per-cpu poll_list, and whoever clears that bit 307 * to the per-CPU poll_list, and whoever clears that bit
306 * can remove from the list right before clearing the bit. 308 * can remove from the list right before clearing the bit.
307 */ 309 */
308 struct list_head poll_list; 310 struct list_head poll_list;
@@ -348,7 +350,7 @@ typedef enum gro_result gro_result_t;
348 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 350 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
349 * case skb->dev was changed by rx_handler. 351 * case skb->dev was changed by rx_handler.
350 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 352 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
351 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. 353 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
352 * 354 *
353 * rx_handlers are functions called from inside __netif_receive_skb(), to do 355 * rx_handlers are functions called from inside __netif_receive_skb(), to do
354 * special processing of the skb, prior to delivery to protocol handlers. 356 * special processing of the skb, prior to delivery to protocol handlers.
@@ -363,19 +365,19 @@ typedef enum gro_result gro_result_t;
363 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 365 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
364 * do with the skb. 366 * do with the skb.
365 * 367 *
366 * If the rx_handler consumed to skb in some way, it should return 368 * If the rx_handler consumed the skb in some way, it should return
367 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 369 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
368 * the skb to be delivered in some other ways. 370 * the skb to be delivered in some other way.
369 * 371 *
370 * If the rx_handler changed skb->dev, to divert the skb to another 372 * If the rx_handler changed skb->dev, to divert the skb to another
371 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 373 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
372 * new device will be called if it exists. 374 * new device will be called if it exists.
373 * 375 *
374 * If the rx_handler consider the skb should be ignored, it should return 376 * If the rx_handler decides the skb should be ignored, it should return
375 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 377 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
376 * are registered on exact device (ptype->dev == skb->dev). 378 * are registered on exact device (ptype->dev == skb->dev).
377 * 379 *
378 * If the rx_handler didn't changed skb->dev, but want the skb to be normally 380 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
379 * delivered, it should return RX_HANDLER_PASS. 381 * delivered, it should return RX_HANDLER_PASS.
380 * 382 *
381 * A device without a registered rx_handler will behave as if rx_handler 383 * A device without a registered rx_handler will behave as if rx_handler
@@ -400,11 +402,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
400} 402}
401 403
402/** 404/**
403 * napi_schedule_prep - check if napi can be scheduled 405 * napi_schedule_prep - check if NAPI can be scheduled
404 * @n: napi context 406 * @n: NAPI context
405 * 407 *
406 * Test if NAPI routine is already running, and if not mark 408 * Test if NAPI routine is already running, and if not mark
407 * it as running. This is used as a condition variable 409 * it as running. This is used as a condition variable to
408 * insure only one NAPI poll instance runs. We also make 410 * insure only one NAPI poll instance runs. We also make
409 * sure there is no pending NAPI disable. 411 * sure there is no pending NAPI disable.
410 */ 412 */
@@ -416,7 +418,7 @@ static inline bool napi_schedule_prep(struct napi_struct *n)
416 418
417/** 419/**
418 * napi_schedule - schedule NAPI poll 420 * napi_schedule - schedule NAPI poll
419 * @n: napi context 421 * @n: NAPI context
420 * 422 *
421 * Schedule NAPI poll routine to be called if it is not already 423 * Schedule NAPI poll routine to be called if it is not already
422 * running. 424 * running.
@@ -429,7 +431,7 @@ static inline void napi_schedule(struct napi_struct *n)
429 431
430/** 432/**
431 * napi_schedule_irqoff - schedule NAPI poll 433 * napi_schedule_irqoff - schedule NAPI poll
432 * @n: napi context 434 * @n: NAPI context
433 * 435 *
434 * Variant of napi_schedule(), assuming hard irqs are masked. 436 * Variant of napi_schedule(), assuming hard irqs are masked.
435 */ 437 */
@@ -453,7 +455,7 @@ void __napi_complete(struct napi_struct *n);
453void napi_complete_done(struct napi_struct *n, int work_done); 455void napi_complete_done(struct napi_struct *n, int work_done);
454/** 456/**
455 * napi_complete - NAPI processing complete 457 * napi_complete - NAPI processing complete
456 * @n: napi context 458 * @n: NAPI context
457 * 459 *
458 * Mark NAPI processing as complete. 460 * Mark NAPI processing as complete.
459 * Consider using napi_complete_done() instead. 461 * Consider using napi_complete_done() instead.
@@ -465,32 +467,32 @@ static inline void napi_complete(struct napi_struct *n)
465 467
466/** 468/**
467 * napi_hash_add - add a NAPI to global hashtable 469 * napi_hash_add - add a NAPI to global hashtable
468 * @napi: napi context 470 * @napi: NAPI context
469 * 471 *
470 * generate a new napi_id and store a @napi under it in napi_hash 472 * Generate a new napi_id and store a @napi under it in napi_hash.
471 * Used for busy polling (CONFIG_NET_RX_BUSY_POLL) 473 * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
472 * Note: This is normally automatically done from netif_napi_add(), 474 * Note: This is normally automatically done from netif_napi_add(),
473 * so might disappear in a future linux version. 475 * so might disappear in a future Linux version.
474 */ 476 */
475void napi_hash_add(struct napi_struct *napi); 477void napi_hash_add(struct napi_struct *napi);
476 478
477/** 479/**
478 * napi_hash_del - remove a NAPI from global table 480 * napi_hash_del - remove a NAPI from global table
479 * @napi: napi context 481 * @napi: NAPI context
480 * 482 *
481 * Warning: caller must observe rcu grace period 483 * Warning: caller must observe RCU grace period
482 * before freeing memory containing @napi, if 484 * before freeing memory containing @napi, if
483 * this function returns true. 485 * this function returns true.
484 * Note: core networking stack automatically calls it 486 * Note: core networking stack automatically calls it
485 * from netif_napi_del() 487 * from netif_napi_del().
486 * Drivers might want to call this helper to combine all 488 * Drivers might want to call this helper to combine all
487 * the needed rcu grace periods into a single one. 489 * the needed RCU grace periods into a single one.
488 */ 490 */
489bool napi_hash_del(struct napi_struct *napi); 491bool napi_hash_del(struct napi_struct *napi);
490 492
491/** 493/**
492 * napi_disable - prevent NAPI from scheduling 494 * napi_disable - prevent NAPI from scheduling
493 * @n: napi context 495 * @n: NAPI context
494 * 496 *
495 * Stop NAPI from being scheduled on this context. 497 * Stop NAPI from being scheduled on this context.
496 * Waits till any outstanding processing completes. 498 * Waits till any outstanding processing completes.
@@ -499,7 +501,7 @@ void napi_disable(struct napi_struct *n);
499 501
500/** 502/**
501 * napi_enable - enable NAPI scheduling 503 * napi_enable - enable NAPI scheduling
502 * @n: napi context 504 * @n: NAPI context
503 * 505 *
504 * Resume NAPI from being scheduled on this context. 506 * Resume NAPI from being scheduled on this context.
505 * Must be paired with napi_disable. 507 * Must be paired with napi_disable.
@@ -514,7 +516,7 @@ static inline void napi_enable(struct napi_struct *n)
514 516
515/** 517/**
516 * napi_synchronize - wait until NAPI is not running 518 * napi_synchronize - wait until NAPI is not running
517 * @n: napi context 519 * @n: NAPI context
518 * 520 *
519 * Wait until NAPI is done being scheduled on this context. 521 * Wait until NAPI is done being scheduled on this context.
520 * Waits till any outstanding processing completes but 522 * Waits till any outstanding processing completes but
@@ -557,7 +559,7 @@ enum netdev_queue_state_t {
557 559
558struct netdev_queue { 560struct netdev_queue {
559/* 561/*
560 * read mostly part 562 * read-mostly part
561 */ 563 */
562 struct net_device *dev; 564 struct net_device *dev;
563 struct Qdisc __rcu *qdisc; 565 struct Qdisc __rcu *qdisc;
@@ -569,7 +571,7 @@ struct netdev_queue {
569 int numa_node; 571 int numa_node;
570#endif 572#endif
571/* 573/*
572 * write mostly part 574 * write-mostly part
573 */ 575 */
574 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 576 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
575 int xmit_lock_owner; 577 int xmit_lock_owner;
@@ -646,11 +648,11 @@ struct rps_dev_flow_table {
646/* 648/*
647 * The rps_sock_flow_table contains mappings of flows to the last CPU 649 * The rps_sock_flow_table contains mappings of flows to the last CPU
648 * on which they were processed by the application (set in recvmsg). 650 * on which they were processed by the application (set in recvmsg).
649 * Each entry is a 32bit value. Upper part is the high order bits 651 * Each entry is a 32bit value. Upper part is the high-order bits
650 * of flow hash, lower part is cpu number. 652 * of flow hash, lower part is CPU number.
651 * rps_cpu_mask is used to partition the space, depending on number of 653 * rps_cpu_mask is used to partition the space, depending on number of
652 * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 654 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
653 * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f, 655 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
654 * meaning we use 32-6=26 bits for the hash. 656 * meaning we use 32-6=26 bits for the hash.
655 */ 657 */
656struct rps_sock_flow_table { 658struct rps_sock_flow_table {
@@ -672,7 +674,7 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
672 unsigned int index = hash & table->mask; 674 unsigned int index = hash & table->mask;
673 u32 val = hash & ~rps_cpu_mask; 675 u32 val = hash & ~rps_cpu_mask;
674 676
675 /* We only give a hint, preemption can change cpu under us */ 677 /* We only give a hint, preemption can change CPU under us */
676 val |= raw_smp_processor_id(); 678 val |= raw_smp_processor_id();
677 679
678 if (table->ents[index] != val) 680 if (table->ents[index] != val)
@@ -778,27 +780,48 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
778typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 780typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
779 struct sk_buff *skb); 781 struct sk_buff *skb);
780 782
783/* These structures hold the attributes of qdisc and classifiers
784 * that are being passed to the netdevice through the setup_tc op.
785 */
786enum {
787 TC_SETUP_MQPRIO,
788 TC_SETUP_CLSU32,
789 TC_SETUP_CLSFLOWER,
790};
791
792struct tc_cls_u32_offload;
793
794struct tc_to_netdev {
795 unsigned int type;
796 union {
797 u8 tc;
798 struct tc_cls_u32_offload *cls_u32;
799 struct tc_cls_flower_offload *cls_flower;
800 };
801};
802
803
781/* 804/*
782 * This structure defines the management hooks for network devices. 805 * This structure defines the management hooks for network devices.
783 * The following hooks can be defined; unless noted otherwise, they are 806 * The following hooks can be defined; unless noted otherwise, they are
784 * optional and can be filled with a null pointer. 807 * optional and can be filled with a null pointer.
785 * 808 *
786 * int (*ndo_init)(struct net_device *dev); 809 * int (*ndo_init)(struct net_device *dev);
787 * This function is called once when network device is registered. 810 * This function is called once when a network device is registered.
788 * The network device can use this to any late stage initializaton 811 * The network device can use this for any late stage initialization
789 * or semantic validattion. It can fail with an error code which will 812 * or semantic validation. It can fail with an error code which will
790 * be propogated back to register_netdev 813 * be propagated back to register_netdev.
791 * 814 *
792 * void (*ndo_uninit)(struct net_device *dev); 815 * void (*ndo_uninit)(struct net_device *dev);
793 * This function is called when device is unregistered or when registration 816 * This function is called when device is unregistered or when registration
794 * fails. It is not called if init fails. 817 * fails. It is not called if init fails.
795 * 818 *
796 * int (*ndo_open)(struct net_device *dev); 819 * int (*ndo_open)(struct net_device *dev);
797 * This function is called when network device transistions to the up 820 * This function is called when a network device transitions to the up
798 * state. 821 * state.
799 * 822 *
800 * int (*ndo_stop)(struct net_device *dev); 823 * int (*ndo_stop)(struct net_device *dev);
801 * This function is called when network device transistions to the down 824 * This function is called when a network device transitions to the down
802 * state. 825 * state.
803 * 826 *
804 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 827 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
@@ -809,7 +832,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
809 * corner cases, but the stack really does a non-trivial amount 832 * corner cases, but the stack really does a non-trivial amount
810 * of useless work if you return NETDEV_TX_BUSY. 833 * of useless work if you return NETDEV_TX_BUSY.
811 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 834 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
812 * Required can not be NULL. 835 * Required; cannot be NULL.
813 * 836 *
814 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 837 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
815 * netdev_features_t features); 838 * netdev_features_t features);
@@ -819,34 +842,34 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
819 * 842 *
820 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 843 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
821 * void *accel_priv, select_queue_fallback_t fallback); 844 * void *accel_priv, select_queue_fallback_t fallback);
822 * Called to decide which queue to when device supports multiple 845 * Called to decide which queue to use when device supports multiple
823 * transmit queues. 846 * transmit queues.
824 * 847 *
825 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 848 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
826 * This function is called to allow device receiver to make 849 * This function is called to allow device receiver to make
827 * changes to configuration when multicast or promiscious is enabled. 850 * changes to configuration when multicast or promiscuous is enabled.
828 * 851 *
829 * void (*ndo_set_rx_mode)(struct net_device *dev); 852 * void (*ndo_set_rx_mode)(struct net_device *dev);
830 * This function is called device changes address list filtering. 853 * This function is called device changes address list filtering.
831 * If driver handles unicast address filtering, it should set 854 * If driver handles unicast address filtering, it should set
832 * IFF_UNICAST_FLT to its priv_flags. 855 * IFF_UNICAST_FLT in its priv_flags.
833 * 856 *
834 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 857 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
835 * This function is called when the Media Access Control address 858 * This function is called when the Media Access Control address
836 * needs to be changed. If this interface is not defined, the 859 * needs to be changed. If this interface is not defined, the
837 * mac address can not be changed. 860 * MAC address can not be changed.
838 * 861 *
839 * int (*ndo_validate_addr)(struct net_device *dev); 862 * int (*ndo_validate_addr)(struct net_device *dev);
840 * Test if Media Access Control address is valid for the device. 863 * Test if Media Access Control address is valid for the device.
841 * 864 *
842 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 865 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
843 * Called when a user request an ioctl which can't be handled by 866 * Called when a user requests an ioctl which can't be handled by
844 * the generic interface code. If not defined ioctl's return 867 * the generic interface code. If not defined ioctls return
845 * not supported error code. 868 * not supported error code.
846 * 869 *
847 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 870 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
848 * Used to set network devices bus interface parameters. This interface 871 * Used to set network devices bus interface parameters. This interface
849 * is retained for legacy reason, new devices should use the bus 872 * is retained for legacy reasons; new devices should use the bus
850 * interface (PCI) for low level management. 873 * interface (PCI) for low level management.
851 * 874 *
852 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 875 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
@@ -855,7 +878,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
855 * will return an error. 878 * will return an error.
856 * 879 *
857 * void (*ndo_tx_timeout)(struct net_device *dev); 880 * void (*ndo_tx_timeout)(struct net_device *dev);
858 * Callback uses when the transmitter has not made any progress 881 * Callback used when the transmitter has not made any progress
859 * for dev->watchdog ticks. 882 * for dev->watchdog ticks.
860 * 883 *
861 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 884 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
@@ -873,11 +896,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
873 * neither operation. 896 * neither operation.
874 * 897 *
875 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 898 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
876 * If device support VLAN filtering this function is called when a 899 * If device supports VLAN filtering this function is called when a
877 * VLAN id is registered. 900 * VLAN id is registered.
878 * 901 *
879 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 902 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
880 * If device support VLAN filtering this function is called when a 903 * If device supports VLAN filtering this function is called when a
881 * VLAN id is unregistered. 904 * VLAN id is unregistered.
882 * 905 *
883 * void (*ndo_poll_controller)(struct net_device *dev); 906 * void (*ndo_poll_controller)(struct net_device *dev);
@@ -897,7 +920,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
897 * 920 *
898 * Enable or disable the VF ability to query its RSS Redirection Table and 921 * Enable or disable the VF ability to query its RSS Redirection Table and
899 * Hash Key. This is needed since on some devices VF share this information 922 * Hash Key. This is needed since on some devices VF share this information
900 * with PF and querying it may adduce a theoretical security risk. 923 * with PF and querying it may introduce a theoretical security risk.
901 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 924 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
902 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 925 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
903 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) 926 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
@@ -1007,20 +1030,20 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1007 * 1030 *
1008 * void (*ndo_add_vxlan_port)(struct net_device *dev, 1031 * void (*ndo_add_vxlan_port)(struct net_device *dev,
1009 * sa_family_t sa_family, __be16 port); 1032 * sa_family_t sa_family, __be16 port);
1010 * Called by vxlan to notiy a driver about the UDP port and socket 1033 * Called by vxlan to notify a driver about the UDP port and socket
1011 * address family that vxlan is listnening to. It is called only when 1034 * address family that vxlan is listening to. It is called only when
1012 * a new port starts listening. The operation is protected by the 1035 * a new port starts listening. The operation is protected by the
1013 * vxlan_net->sock_lock. 1036 * vxlan_net->sock_lock.
1014 * 1037 *
1015 * void (*ndo_add_geneve_port)(struct net_device *dev, 1038 * void (*ndo_add_geneve_port)(struct net_device *dev,
1016 * sa_family_t sa_family, __be16 port); 1039 * sa_family_t sa_family, __be16 port);
1017 * Called by geneve to notify a driver about the UDP port and socket 1040 * Called by geneve to notify a driver about the UDP port and socket
1018 * address family that geneve is listnening to. It is called only when 1041 * address family that geneve is listnening to. It is called only when
1019 * a new port starts listening. The operation is protected by the 1042 * a new port starts listening. The operation is protected by the
1020 * geneve_net->sock_lock. 1043 * geneve_net->sock_lock.
1021 * 1044 *
1022 * void (*ndo_del_geneve_port)(struct net_device *dev, 1045 * void (*ndo_del_geneve_port)(struct net_device *dev,
1023 * sa_family_t sa_family, __be16 port); 1046 * sa_family_t sa_family, __be16 port);
1024 * Called by geneve to notify the driver about a UDP port and socket 1047 * Called by geneve to notify the driver about a UDP port and socket
1025 * address family that geneve is not listening to anymore. The operation 1048 * address family that geneve is not listening to anymore. The operation
1026 * is protected by the geneve_net->sock_lock. 1049 * is protected by the geneve_net->sock_lock.
@@ -1049,9 +1072,9 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1049 * Callback to use for xmit over the accelerated station. This 1072 * Callback to use for xmit over the accelerated station. This
1050 * is used in place of ndo_start_xmit on accelerated net 1073 * is used in place of ndo_start_xmit on accelerated net
1051 * devices. 1074 * devices.
1052 * netdev_features_t (*ndo_features_check) (struct sk_buff *skb, 1075 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1053 * struct net_device *dev 1076 * struct net_device *dev
1054 * netdev_features_t features); 1077 * netdev_features_t features);
1055 * Called by core transmit path to determine if device is capable of 1078 * Called by core transmit path to determine if device is capable of
1056 * performing offload operations on a given packet. This is to give 1079 * performing offload operations on a given packet. This is to give
1057 * the device an opportunity to implement any restrictions that cannot 1080 * the device an opportunity to implement any restrictions that cannot
@@ -1065,7 +1088,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1065 * int (*ndo_get_iflink)(const struct net_device *dev); 1088 * int (*ndo_get_iflink)(const struct net_device *dev);
1066 * Called to get the iflink value of this device. 1089 * Called to get the iflink value of this device.
1067 * void (*ndo_change_proto_down)(struct net_device *dev, 1090 * void (*ndo_change_proto_down)(struct net_device *dev,
1068 * bool proto_down); 1091 * bool proto_down);
1069 * This function is used to pass protocol port error state information 1092 * This function is used to pass protocol port error state information
1070 * to the switch driver. The switch driver can react to the proto_down 1093 * to the switch driver. The switch driver can react to the proto_down
1071 * by doing a phys down on the associated switch port. 1094 * by doing a phys down on the associated switch port.
@@ -1073,6 +1096,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1073 * This function is used to get egress tunnel information for given skb. 1096 * This function is used to get egress tunnel information for given skb.
1074 * This is useful for retrieving outer tunnel header parameters while 1097 * This is useful for retrieving outer tunnel header parameters while
1075 * sampling packet. 1098 * sampling packet.
1099 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1100 * This function is used to specify the headroom that the skb must
1101 * consider when allocation skb during packet reception. Setting
1102 * appropriate rx headroom value allows avoiding skb head copy on
1103 * forward. Setting a negative value resets the rx headroom to the
1104 * default value.
1076 * 1105 *
1077 */ 1106 */
1078struct net_device_ops { 1107struct net_device_ops {
@@ -1147,10 +1176,16 @@ struct net_device_ops {
1147 struct nlattr *port[]); 1176 struct nlattr *port[]);
1148 int (*ndo_get_vf_port)(struct net_device *dev, 1177 int (*ndo_get_vf_port)(struct net_device *dev,
1149 int vf, struct sk_buff *skb); 1178 int vf, struct sk_buff *skb);
1179 int (*ndo_set_vf_guid)(struct net_device *dev,
1180 int vf, u64 guid,
1181 int guid_type);
1150 int (*ndo_set_vf_rss_query_en)( 1182 int (*ndo_set_vf_rss_query_en)(
1151 struct net_device *dev, 1183 struct net_device *dev,
1152 int vf, bool setting); 1184 int vf, bool setting);
1153 int (*ndo_setup_tc)(struct net_device *dev, u8 tc); 1185 int (*ndo_setup_tc)(struct net_device *dev,
1186 u32 handle,
1187 __be16 protocol,
1188 struct tc_to_netdev *tc);
1154#if IS_ENABLED(CONFIG_FCOE) 1189#if IS_ENABLED(CONFIG_FCOE)
1155 int (*ndo_fcoe_enable)(struct net_device *dev); 1190 int (*ndo_fcoe_enable)(struct net_device *dev);
1156 int (*ndo_fcoe_disable)(struct net_device *dev); 1191 int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -1255,6 +1290,8 @@ struct net_device_ops {
1255 bool proto_down); 1290 bool proto_down);
1256 int (*ndo_fill_metadata_dst)(struct net_device *dev, 1291 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1257 struct sk_buff *skb); 1292 struct sk_buff *skb);
1293 void (*ndo_set_rx_headroom)(struct net_device *dev,
1294 int needed_headroom);
1258}; 1295};
1259 1296
1260/** 1297/**
@@ -1262,7 +1299,7 @@ struct net_device_ops {
1262 * 1299 *
1263 * These are the &struct net_device, they are only set internally 1300 * These are the &struct net_device, they are only set internally
1264 * by drivers and used in the kernel. These flags are invisible to 1301 * by drivers and used in the kernel. These flags are invisible to
1265 * userspace, this means that the order of these flags can change 1302 * userspace; this means that the order of these flags can change
1266 * during any kernel release. 1303 * during any kernel release.
1267 * 1304 *
1268 * You should have a pretty good reason to be extending these flags. 1305 * You should have a pretty good reason to be extending these flags.
@@ -1286,11 +1323,19 @@ struct net_device_ops {
1286 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1323 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1287 * change when it's running 1324 * change when it's running
1288 * @IFF_MACVLAN: Macvlan device 1325 * @IFF_MACVLAN: Macvlan device
1326 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1327 * underlying stacked devices
1328 * @IFF_IPVLAN_MASTER: IPvlan master device
1329 * @IFF_IPVLAN_SLAVE: IPvlan slave device
1289 * @IFF_L3MDEV_MASTER: device is an L3 master device 1330 * @IFF_L3MDEV_MASTER: device is an L3 master device
1290 * @IFF_NO_QUEUE: device can run without qdisc attached 1331 * @IFF_NO_QUEUE: device can run without qdisc attached
1291 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1332 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1292 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1333 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1293 * @IFF_TEAM: device is a team device 1334 * @IFF_TEAM: device is a team device
1335 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1336 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1337 * entity (i.e. the master device for bridged veth)
1338 * @IFF_MACSEC: device is a MACsec device
1294 */ 1339 */
1295enum netdev_priv_flags { 1340enum netdev_priv_flags {
1296 IFF_802_1Q_VLAN = 1<<0, 1341 IFF_802_1Q_VLAN = 1<<0,
@@ -1318,6 +1363,9 @@ enum netdev_priv_flags {
1318 IFF_OPENVSWITCH = 1<<22, 1363 IFF_OPENVSWITCH = 1<<22,
1319 IFF_L3MDEV_SLAVE = 1<<23, 1364 IFF_L3MDEV_SLAVE = 1<<23,
1320 IFF_TEAM = 1<<24, 1365 IFF_TEAM = 1<<24,
1366 IFF_RXFH_CONFIGURED = 1<<25,
1367 IFF_PHONY_HEADROOM = 1<<26,
1368 IFF_MACSEC = 1<<27,
1321}; 1369};
1322 1370
1323#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1371#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1345,6 +1393,8 @@ enum netdev_priv_flags {
1345#define IFF_OPENVSWITCH IFF_OPENVSWITCH 1393#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1346#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1394#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1347#define IFF_TEAM IFF_TEAM 1395#define IFF_TEAM IFF_TEAM
1396#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1397#define IFF_MACSEC IFF_MACSEC
1348 1398
1349/** 1399/**
1350 * struct net_device - The DEVICE structure. 1400 * struct net_device - The DEVICE structure.
@@ -1367,10 +1417,12 @@ enum netdev_priv_flags {
1367 * 1417 *
1368 * @state: Generic network queuing layer state, see netdev_state_t 1418 * @state: Generic network queuing layer state, see netdev_state_t
1369 * @dev_list: The global list of network devices 1419 * @dev_list: The global list of network devices
1370 * @napi_list: List entry, that is used for polling napi devices 1420 * @napi_list: List entry used for polling NAPI devices
1371 * @unreg_list: List entry, that is used, when we are unregistering the 1421 * @unreg_list: List entry when we are unregistering the
1372 * device, see the function unregister_netdev 1422 * device; see the function unregister_netdev
1373 * @close_list: List entry, that is used, when we are closing the device 1423 * @close_list: List entry used when we are closing the device
1424 * @ptype_all: Device-specific packet handlers for all protocols
1425 * @ptype_specific: Device-specific, protocol-specific packet handlers
1374 * 1426 *
1375 * @adj_list: Directly linked devices, like slaves for bonding 1427 * @adj_list: Directly linked devices, like slaves for bonding
1376 * @all_adj_list: All linked devices, *including* neighbours 1428 * @all_adj_list: All linked devices, *including* neighbours
@@ -1388,7 +1440,7 @@ enum netdev_priv_flags {
1388 * @mpls_features: Mask of features inheritable by MPLS 1440 * @mpls_features: Mask of features inheritable by MPLS
1389 * 1441 *
1390 * @ifindex: interface index 1442 * @ifindex: interface index
1391 * @group: The group, that the device belongs to 1443 * @group: The group the device belongs to
1392 * 1444 *
1393 * @stats: Statistics struct, which was left as a legacy, use 1445 * @stats: Statistics struct, which was left as a legacy, use
1394 * rtnl_link_stats64 instead 1446 * rtnl_link_stats64 instead
@@ -1397,6 +1449,8 @@ enum netdev_priv_flags {
1397 * do not use this in drivers 1449 * do not use this in drivers
1398 * @tx_dropped: Dropped packets by core network, 1450 * @tx_dropped: Dropped packets by core network,
1399 * do not use this in drivers 1451 * do not use this in drivers
1452 * @rx_nohandler: nohandler dropped packets by core network on
1453 * inactive devices, do not use this in drivers
1400 * 1454 *
1401 * @wireless_handlers: List of functions to handle Wireless Extensions, 1455 * @wireless_handlers: List of functions to handle Wireless Extensions,
1402 * instead of ioctl, 1456 * instead of ioctl,
@@ -1420,8 +1474,7 @@ enum netdev_priv_flags {
1420 * @dma: DMA channel 1474 * @dma: DMA channel
1421 * @mtu: Interface MTU value 1475 * @mtu: Interface MTU value
1422 * @type: Interface hardware type 1476 * @type: Interface hardware type
1423 * @hard_header_len: Hardware header length, which means that this is the 1477 * @hard_header_len: Maximum hardware header length.
1424 * minimum size of a packet.
1425 * 1478 *
1426 * @needed_headroom: Extra headroom the hardware may need, but not in all 1479 * @needed_headroom: Extra headroom the hardware may need, but not in all
1427 * cases can this be guaranteed 1480 * cases can this be guaranteed
@@ -1441,7 +1494,7 @@ enum netdev_priv_flags {
1441 * @dev_port: Used to differentiate devices that share 1494 * @dev_port: Used to differentiate devices that share
1442 * the same function 1495 * the same function
1443 * @addr_list_lock: XXX: need comments on this one 1496 * @addr_list_lock: XXX: need comments on this one
1444 * @uc_promisc: Counter, that indicates, that promiscuous mode 1497 * @uc_promisc: Counter that indicates promiscuous mode
1445 * has been enabled due to the need to listen to 1498 * has been enabled due to the need to listen to
1446 * additional unicast addresses in a device that 1499 * additional unicast addresses in a device that
1447 * does not implement ndo_set_rx_mode() 1500 * does not implement ndo_set_rx_mode()
@@ -1449,9 +1502,9 @@ enum netdev_priv_flags {
1449 * @mc: multicast mac addresses 1502 * @mc: multicast mac addresses
1450 * @dev_addrs: list of device hw addresses 1503 * @dev_addrs: list of device hw addresses
1451 * @queues_kset: Group of all Kobjects in the Tx and RX queues 1504 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1452 * @promiscuity: Number of times, the NIC is told to work in 1505 * @promiscuity: Number of times the NIC is told to work in
1453 * Promiscuous mode, if it becomes 0 the NIC will 1506 * promiscuous mode; if it becomes 0 the NIC will
1454 * exit from working in Promiscuous mode 1507 * exit promiscuous mode
1455 * @allmulti: Counter, enables or disables allmulticast mode 1508 * @allmulti: Counter, enables or disables allmulticast mode
1456 * 1509 *
1457 * @vlan_info: VLAN info 1510 * @vlan_info: VLAN info
@@ -1497,7 +1550,7 @@ enum netdev_priv_flags {
1497 * 1550 *
1498 * @trans_start: Time (in jiffies) of last Tx 1551 * @trans_start: Time (in jiffies) of last Tx
1499 * @watchdog_timeo: Represents the timeout that is used by 1552 * @watchdog_timeo: Represents the timeout that is used by
1500 * the watchdog ( see dev_watchdog() ) 1553 * the watchdog (see dev_watchdog())
1501 * @watchdog_timer: List of timers 1554 * @watchdog_timer: List of timers
1502 * 1555 *
1503 * @pcpu_refcnt: Number of references to this device 1556 * @pcpu_refcnt: Number of references to this device
@@ -1611,10 +1664,11 @@ struct net_device {
1611 1664
1612 atomic_long_t rx_dropped; 1665 atomic_long_t rx_dropped;
1613 atomic_long_t tx_dropped; 1666 atomic_long_t tx_dropped;
1667 atomic_long_t rx_nohandler;
1614 1668
1615#ifdef CONFIG_WIRELESS_EXT 1669#ifdef CONFIG_WIRELESS_EXT
1616 const struct iw_handler_def * wireless_handlers; 1670 const struct iw_handler_def *wireless_handlers;
1617 struct iw_public_data * wireless_data; 1671 struct iw_public_data *wireless_data;
1618#endif 1672#endif
1619 const struct net_device_ops *netdev_ops; 1673 const struct net_device_ops *netdev_ops;
1620 const struct ethtool_ops *ethtool_ops; 1674 const struct ethtool_ops *ethtool_ops;
@@ -1667,7 +1721,7 @@ struct net_device {
1667 unsigned int allmulti; 1721 unsigned int allmulti;
1668 1722
1669 1723
1670 /* Protocol specific pointers */ 1724 /* Protocol-specific pointers */
1671 1725
1672#if IS_ENABLED(CONFIG_VLAN_8021Q) 1726#if IS_ENABLED(CONFIG_VLAN_8021Q)
1673 struct vlan_info __rcu *vlan_info; 1727 struct vlan_info __rcu *vlan_info;
@@ -1697,13 +1751,11 @@ struct net_device {
1697 /* Interface address info used in eth_type_trans() */ 1751 /* Interface address info used in eth_type_trans() */
1698 unsigned char *dev_addr; 1752 unsigned char *dev_addr;
1699 1753
1700
1701#ifdef CONFIG_SYSFS 1754#ifdef CONFIG_SYSFS
1702 struct netdev_rx_queue *_rx; 1755 struct netdev_rx_queue *_rx;
1703 1756
1704 unsigned int num_rx_queues; 1757 unsigned int num_rx_queues;
1705 unsigned int real_num_rx_queues; 1758 unsigned int real_num_rx_queues;
1706
1707#endif 1759#endif
1708 1760
1709 unsigned long gro_flush_timeout; 1761 unsigned long gro_flush_timeout;
@@ -1795,7 +1847,7 @@ struct net_device {
1795 struct garp_port __rcu *garp_port; 1847 struct garp_port __rcu *garp_port;
1796 struct mrp_port __rcu *mrp_port; 1848 struct mrp_port __rcu *mrp_port;
1797 1849
1798 struct device dev; 1850 struct device dev;
1799 const struct attribute_group *sysfs_groups[4]; 1851 const struct attribute_group *sysfs_groups[4];
1800 const struct attribute_group *sysfs_rx_queue_group; 1852 const struct attribute_group *sysfs_rx_queue_group;
1801 1853
@@ -1810,9 +1862,9 @@ struct net_device {
1810#ifdef CONFIG_DCB 1862#ifdef CONFIG_DCB
1811 const struct dcbnl_rtnl_ops *dcbnl_ops; 1863 const struct dcbnl_rtnl_ops *dcbnl_ops;
1812#endif 1864#endif
1813 u8 num_tc; 1865 u8 num_tc;
1814 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 1866 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1815 u8 prio_tc_map[TC_BITMASK + 1]; 1867 u8 prio_tc_map[TC_BITMASK + 1];
1816 1868
1817#if IS_ENABLED(CONFIG_FCOE) 1869#if IS_ENABLED(CONFIG_FCOE)
1818 unsigned int fcoe_ddp_xid; 1870 unsigned int fcoe_ddp_xid;
@@ -1820,9 +1872,9 @@ struct net_device {
1820#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 1872#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1821 struct netprio_map __rcu *priomap; 1873 struct netprio_map __rcu *priomap;
1822#endif 1874#endif
1823 struct phy_device *phydev; 1875 struct phy_device *phydev;
1824 struct lock_class_key *qdisc_tx_busylock; 1876 struct lock_class_key *qdisc_tx_busylock;
1825 bool proto_down; 1877 bool proto_down;
1826}; 1878};
1827#define to_net_dev(d) container_of(d, struct net_device, dev) 1879#define to_net_dev(d) container_of(d, struct net_device, dev)
1828 1880
@@ -1908,6 +1960,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1908 struct sk_buff *skb, 1960 struct sk_buff *skb,
1909 void *accel_priv); 1961 void *accel_priv);
1910 1962
1963/* returns the headroom that the master device needs to take in account
1964 * when forwarding to this dev
1965 */
1966static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
1967{
1968 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
1969}
1970
1971static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
1972{
1973 if (dev->netdev_ops->ndo_set_rx_headroom)
1974 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
1975}
1976
1977/* set the device rx headroom to the dev's default */
1978static inline void netdev_reset_rx_headroom(struct net_device *dev)
1979{
1980 netdev_set_rx_headroom(dev, -1);
1981}
1982
1911/* 1983/*
1912 * Net namespace inlines 1984 * Net namespace inlines
1913 */ 1985 */
@@ -1950,7 +2022,7 @@ static inline void *netdev_priv(const struct net_device *dev)
1950 2022
1951/* Set the sysfs device type for the network logical device to allow 2023/* Set the sysfs device type for the network logical device to allow
1952 * fine-grained identification of different network device types. For 2024 * fine-grained identification of different network device types. For
1953 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. 2025 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
1954 */ 2026 */
1955#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 2027#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1956 2028
@@ -1960,22 +2032,22 @@ static inline void *netdev_priv(const struct net_device *dev)
1960#define NAPI_POLL_WEIGHT 64 2032#define NAPI_POLL_WEIGHT 64
1961 2033
1962/** 2034/**
1963 * netif_napi_add - initialize a napi context 2035 * netif_napi_add - initialize a NAPI context
1964 * @dev: network device 2036 * @dev: network device
1965 * @napi: napi context 2037 * @napi: NAPI context
1966 * @poll: polling function 2038 * @poll: polling function
1967 * @weight: default weight 2039 * @weight: default weight
1968 * 2040 *
1969 * netif_napi_add() must be used to initialize a napi context prior to calling 2041 * netif_napi_add() must be used to initialize a NAPI context prior to calling
1970 * *any* of the other napi related functions. 2042 * *any* of the other NAPI-related functions.
1971 */ 2043 */
1972void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 2044void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1973 int (*poll)(struct napi_struct *, int), int weight); 2045 int (*poll)(struct napi_struct *, int), int weight);
1974 2046
1975/** 2047/**
1976 * netif_tx_napi_add - initialize a napi context 2048 * netif_tx_napi_add - initialize a NAPI context
1977 * @dev: network device 2049 * @dev: network device
1978 * @napi: napi context 2050 * @napi: NAPI context
1979 * @poll: polling function 2051 * @poll: polling function
1980 * @weight: default weight 2052 * @weight: default weight
1981 * 2053 *
@@ -1993,22 +2065,22 @@ static inline void netif_tx_napi_add(struct net_device *dev,
1993} 2065}
1994 2066
1995/** 2067/**
1996 * netif_napi_del - remove a napi context 2068 * netif_napi_del - remove a NAPI context
1997 * @napi: napi context 2069 * @napi: NAPI context
1998 * 2070 *
1999 * netif_napi_del() removes a napi context from the network device napi list 2071 * netif_napi_del() removes a NAPI context from the network device NAPI list
2000 */ 2072 */
2001void netif_napi_del(struct napi_struct *napi); 2073void netif_napi_del(struct napi_struct *napi);
2002 2074
2003struct napi_gro_cb { 2075struct napi_gro_cb {
2004 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ 2076 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2005 void *frag0; 2077 void *frag0;
2006 2078
2007 /* Length of frag0. */ 2079 /* Length of frag0. */
2008 unsigned int frag0_len; 2080 unsigned int frag0_len;
2009 2081
2010 /* This indicates where we are processing relative to skb->data. */ 2082 /* This indicates where we are processing relative to skb->data. */
2011 int data_offset; 2083 int data_offset;
2012 2084
2013 /* This is non-zero if the packet cannot be merged with the new skb. */ 2085 /* This is non-zero if the packet cannot be merged with the new skb. */
2014 u16 flush; 2086 u16 flush;
@@ -2031,8 +2103,8 @@ struct napi_gro_cb {
2031 /* This is non-zero if the packet may be of the same flow. */ 2103 /* This is non-zero if the packet may be of the same flow. */
2032 u8 same_flow:1; 2104 u8 same_flow:1;
2033 2105
2034 /* Used in udp_gro_receive */ 2106 /* Used in tunnel GRO receive */
2035 u8 udp_mark:1; 2107 u8 encap_mark:1;
2036 2108
2037 /* GRO checksum is valid */ 2109 /* GRO checksum is valid */
2038 u8 csum_valid:1; 2110 u8 csum_valid:1;
@@ -2048,7 +2120,10 @@ struct napi_gro_cb {
2048 /* Used in foo-over-udp, set in udp[46]_gro_receive */ 2120 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2049 u8 is_ipv6:1; 2121 u8 is_ipv6:1;
2050 2122
2051 /* 7 bit hole */ 2123 /* Used in GRE, set in fou/gue_gro_receive */
2124 u8 is_fou:1;
2125
2126 /* 6 bit hole */
2052 2127
2053 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 2128 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2054 __wsum csum; 2129 __wsum csum;
@@ -2089,6 +2164,9 @@ struct packet_offload {
2089 2164
2090struct udp_offload; 2165struct udp_offload;
2091 2166
2167/* 'skb->encapsulation' is set before gro_complete() is called. gro_complete()
2168 * must set 'skb->inner_mac_header' to the beginning of tunnel payload.
2169 */
2092struct udp_offload_callbacks { 2170struct udp_offload_callbacks {
2093 struct sk_buff **(*gro_receive)(struct sk_buff **head, 2171 struct sk_buff **(*gro_receive)(struct sk_buff **head,
2094 struct sk_buff *skb, 2172 struct sk_buff *skb,
@@ -2104,7 +2182,7 @@ struct udp_offload {
2104 struct udp_offload_callbacks callbacks; 2182 struct udp_offload_callbacks callbacks;
2105}; 2183};
2106 2184
2107/* often modified stats are per cpu, other are shared (netdev->stats) */ 2185/* often modified stats are per-CPU, other are shared (netdev->stats) */
2108struct pcpu_sw_netstats { 2186struct pcpu_sw_netstats {
2109 u64 rx_packets; 2187 u64 rx_packets;
2110 u64 rx_bytes; 2188 u64 rx_bytes;
@@ -2201,7 +2279,7 @@ struct netdev_notifier_changeupper_info {
2201 struct netdev_notifier_info info; /* must be first */ 2279 struct netdev_notifier_info info; /* must be first */
2202 struct net_device *upper_dev; /* new upper dev */ 2280 struct net_device *upper_dev; /* new upper dev */
2203 bool master; /* is upper dev master */ 2281 bool master; /* is upper dev master */
2204 bool linking; /* is the nofication for link or unlink */ 2282 bool linking; /* is the notification for link or unlink */
2205 void *upper_info; /* upper dev info */ 2283 void *upper_info; /* upper dev info */
2206}; 2284};
2207 2285
@@ -2627,6 +2705,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
2627 return dev->header_ops->parse(skb, haddr); 2705 return dev->header_ops->parse(skb, haddr);
2628} 2706}
2629 2707
2708/* ll_header must have at least hard_header_len allocated */
2709static inline bool dev_validate_header(const struct net_device *dev,
2710 char *ll_header, int len)
2711{
2712 if (likely(len >= dev->hard_header_len))
2713 return true;
2714
2715 if (capable(CAP_SYS_RAWIO)) {
2716 memset(ll_header + len, 0, dev->hard_header_len - len);
2717 return true;
2718 }
2719
2720 if (dev->header_ops && dev->header_ops->validate)
2721 return dev->header_ops->validate(ll_header, len);
2722
2723 return false;
2724}
2725
2630typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 2726typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2631int register_gifconf(unsigned int family, gifconf_func_t *gifconf); 2727int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2632static inline int unregister_gifconf(unsigned int family) 2728static inline int unregister_gifconf(unsigned int family)
@@ -2648,7 +2744,7 @@ extern int netdev_flow_limit_table_len;
2648#endif /* CONFIG_NET_FLOW_LIMIT */ 2744#endif /* CONFIG_NET_FLOW_LIMIT */
2649 2745
2650/* 2746/*
2651 * Incoming packets are placed on per-cpu queues 2747 * Incoming packets are placed on per-CPU queues
2652 */ 2748 */
2653struct softnet_data { 2749struct softnet_data {
2654 struct list_head poll_list; 2750 struct list_head poll_list;
@@ -2818,7 +2914,7 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2818 * @dev_queue: pointer to transmit queue 2914 * @dev_queue: pointer to transmit queue
2819 * 2915 *
2820 * BQL enabled drivers might use this helper in their ndo_start_xmit(), 2916 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2821 * to give appropriate hint to the cpu. 2917 * to give appropriate hint to the CPU.
2822 */ 2918 */
2823static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) 2919static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2824{ 2920{
@@ -2832,7 +2928,7 @@ static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_que
2832 * @dev_queue: pointer to transmit queue 2928 * @dev_queue: pointer to transmit queue
2833 * 2929 *
2834 * BQL enabled drivers might use this helper in their TX completion path, 2930 * BQL enabled drivers might use this helper in their TX completion path,
2835 * to give appropriate hint to the cpu. 2931 * to give appropriate hint to the CPU.
2836 */ 2932 */
2837static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) 2933static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2838{ 2934{
@@ -2971,7 +3067,7 @@ static inline bool netif_running(const struct net_device *dev)
2971} 3067}
2972 3068
2973/* 3069/*
2974 * Routines to manage the subqueues on a device. We only need start 3070 * Routines to manage the subqueues on a device. We only need start,
2975 * stop, and a check if it's stopped. All other device management is 3071 * stop, and a check if it's stopped. All other device management is
2976 * done at the overall netdevice level. 3072 * done at the overall netdevice level.
2977 * Also test the device if we're multiqueue. 3073 * Also test the device if we're multiqueue.
@@ -3255,7 +3351,6 @@ void netif_carrier_off(struct net_device *dev);
3255 * in a "pending" state, waiting for some external event. For "on- 3351 * in a "pending" state, waiting for some external event. For "on-
3256 * demand" interfaces, this new state identifies the situation where the 3352 * demand" interfaces, this new state identifies the situation where the
3257 * interface is waiting for events to place it in the up state. 3353 * interface is waiting for events to place it in the up state.
3258 *
3259 */ 3354 */
3260static inline void netif_dormant_on(struct net_device *dev) 3355static inline void netif_dormant_on(struct net_device *dev)
3261{ 3356{
@@ -3590,7 +3685,7 @@ void dev_uc_init(struct net_device *dev);
3590 * 3685 *
3591 * Add newly added addresses to the interface, and release 3686 * Add newly added addresses to the interface, and release
3592 * addresses that have been deleted. 3687 * addresses that have been deleted.
3593 **/ 3688 */
3594static inline int __dev_uc_sync(struct net_device *dev, 3689static inline int __dev_uc_sync(struct net_device *dev,
3595 int (*sync)(struct net_device *, 3690 int (*sync)(struct net_device *,
3596 const unsigned char *), 3691 const unsigned char *),
@@ -3606,7 +3701,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
3606 * @unsync: function to call if address should be removed 3701 * @unsync: function to call if address should be removed
3607 * 3702 *
3608 * Remove all addresses that were added to the device by dev_uc_sync(). 3703 * Remove all addresses that were added to the device by dev_uc_sync().
3609 **/ 3704 */
3610static inline void __dev_uc_unsync(struct net_device *dev, 3705static inline void __dev_uc_unsync(struct net_device *dev,
3611 int (*unsync)(struct net_device *, 3706 int (*unsync)(struct net_device *,
3612 const unsigned char *)) 3707 const unsigned char *))
@@ -3634,7 +3729,7 @@ void dev_mc_init(struct net_device *dev);
3634 * 3729 *
3635 * Add newly added addresses to the interface, and release 3730 * Add newly added addresses to the interface, and release
3636 * addresses that have been deleted. 3731 * addresses that have been deleted.
3637 **/ 3732 */
3638static inline int __dev_mc_sync(struct net_device *dev, 3733static inline int __dev_mc_sync(struct net_device *dev,
3639 int (*sync)(struct net_device *, 3734 int (*sync)(struct net_device *,
3640 const unsigned char *), 3735 const unsigned char *),
@@ -3650,7 +3745,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
3650 * @unsync: function to call if address should be removed 3745 * @unsync: function to call if address should be removed
3651 * 3746 *
3652 * Remove all addresses that were added to the device by dev_mc_sync(). 3747 * Remove all addresses that were added to the device by dev_mc_sync().
3653 **/ 3748 */
3654static inline void __dev_mc_unsync(struct net_device *dev, 3749static inline void __dev_mc_unsync(struct net_device *dev,
3655 int (*unsync)(struct net_device *, 3750 int (*unsync)(struct net_device *,
3656 const unsigned char *)) 3751 const unsigned char *))
@@ -3741,7 +3836,7 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
3741 3836
3742/* RSS keys are 40 or 52 bytes long */ 3837/* RSS keys are 40 or 52 bytes long */
3743#define NETDEV_RSS_KEY_LEN 52 3838#define NETDEV_RSS_KEY_LEN 52
3744extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; 3839extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3745void netdev_rss_key_fill(void *buffer, size_t len); 3840void netdev_rss_key_fill(void *buffer, size_t len);
3746 3841
3747int dev_get_nest_level(struct net_device *dev, 3842int dev_get_nest_level(struct net_device *dev,
@@ -3912,7 +4007,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
3912 4007
3913static inline bool net_gso_ok(netdev_features_t features, int gso_type) 4008static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3914{ 4009{
3915 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; 4010 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
3916 4011
3917 /* check flags correspondence */ 4012 /* check flags correspondence */
3918 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 4013 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
@@ -3965,6 +4060,11 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3965 skb->mac_len = mac_len; 4060 skb->mac_len = mac_len;
3966} 4061}
3967 4062
4063static inline bool netif_is_macsec(const struct net_device *dev)
4064{
4065 return dev->priv_flags & IFF_MACSEC;
4066}
4067
3968static inline bool netif_is_macvlan(const struct net_device *dev) 4068static inline bool netif_is_macvlan(const struct net_device *dev)
3969{ 4069{
3970 return dev->priv_flags & IFF_MACVLAN; 4070 return dev->priv_flags & IFF_MACVLAN;
@@ -4045,6 +4145,11 @@ static inline bool netif_is_lag_port(const struct net_device *dev)
4045 return netif_is_bond_slave(dev) || netif_is_team_port(dev); 4145 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4046} 4146}
4047 4147
4148static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4149{
4150 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4151}
4152
4048/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 4153/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4049static inline void netif_keep_dst(struct net_device *dev) 4154static inline void netif_keep_dst(struct net_device *dev)
4050{ 4155{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0ad556726181..9230f9aee896 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -141,22 +141,6 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
141 141
142#ifdef HAVE_JUMP_LABEL 142#ifdef HAVE_JUMP_LABEL
143extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 143extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
144
145static inline bool nf_hook_list_active(struct list_head *hook_list,
146 u_int8_t pf, unsigned int hook)
147{
148 if (__builtin_constant_p(pf) &&
149 __builtin_constant_p(hook))
150 return static_key_false(&nf_hooks_needed[pf][hook]);
151
152 return !list_empty(hook_list);
153}
154#else
155static inline bool nf_hook_list_active(struct list_head *hook_list,
156 u_int8_t pf, unsigned int hook)
157{
158 return !list_empty(hook_list);
159}
160#endif 144#endif
161 145
162int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); 146int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
@@ -177,9 +161,18 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
177 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 161 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
178 int thresh) 162 int thresh)
179{ 163{
180 struct list_head *hook_list = &net->nf.hooks[pf][hook]; 164 struct list_head *hook_list;
165
166#ifdef HAVE_JUMP_LABEL
167 if (__builtin_constant_p(pf) &&
168 __builtin_constant_p(hook) &&
169 !static_key_false(&nf_hooks_needed[pf][hook]))
170 return 1;
171#endif
172
173 hook_list = &net->nf.hooks[pf][hook];
181 174
182 if (nf_hook_list_active(hook_list, pf, hook)) { 175 if (!list_empty(hook_list)) {
183 struct nf_hook_state state; 176 struct nf_hook_state state;
184 177
185 nf_hook_state_init(&state, hook_list, hook, thresh, 178 nf_hook_state_init(&state, hook_list, hook, thresh,
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 0e1f433cc4b7..f48b8a664b0f 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -234,6 +234,10 @@ struct ip_set {
234 spinlock_t lock; 234 spinlock_t lock;
235 /* References to the set */ 235 /* References to the set */
236 u32 ref; 236 u32 ref;
237 /* References to the set for netlink events like dump,
238 * ref can be swapped out by ip_set_swap
239 */
240 u32 ref_netlink;
237 /* The core set type */ 241 /* The core set type */
238 struct ip_set_type *type; 242 struct ip_set_type *type;
239 /* The type variant doing the real job */ 243 /* The type variant doing the real job */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index ba0d9789eb6e..1d82dd5e9a08 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -34,8 +34,6 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
34int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); 34int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
35 35
36int nfnetlink_has_listeners(struct net *net, unsigned int group); 36int nfnetlink_has_listeners(struct net *net, unsigned int group);
37struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
38 u32 dst_portid, gfp_t gfp_mask);
39int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 37int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
40 unsigned int group, int echo, gfp_t flags); 38 unsigned int group, int echo, gfp_t flags);
41int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); 39int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index c5577410c25d..80a305b85323 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -200,6 +200,9 @@ struct xt_table {
200 u_int8_t af; /* address/protocol family */ 200 u_int8_t af; /* address/protocol family */
201 int priority; /* hook order */ 201 int priority; /* hook order */
202 202
203 /* called when table is needed in the given netns */
204 int (*table_init)(struct net *net);
205
203 /* A unique name... */ 206 /* A unique name... */
204 const char name[XT_TABLE_MAXNAMELEN]; 207 const char name[XT_TABLE_MAXNAMELEN];
205}; 208};
@@ -408,8 +411,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
408 return cnt; 411 return cnt;
409} 412}
410 413
411struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); 414struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
412void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
413 415
414#ifdef CONFIG_COMPAT 416#ifdef CONFIG_COMPAT
415#include <net/compat.h> 417#include <net/compat.h>
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 6f074db2f23d..029b95e8924e 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -48,10 +48,11 @@ struct arpt_error {
48} 48}
49 49
50extern void *arpt_alloc_initial_table(const struct xt_table *); 50extern void *arpt_alloc_initial_table(const struct xt_table *);
51extern struct xt_table *arpt_register_table(struct net *net, 51int arpt_register_table(struct net *net, const struct xt_table *table,
52 const struct xt_table *table, 52 const struct arpt_replace *repl,
53 const struct arpt_replace *repl); 53 const struct nf_hook_ops *ops, struct xt_table **res);
54extern void arpt_unregister_table(struct xt_table *table); 54void arpt_unregister_table(struct net *net, struct xt_table *table,
55 const struct nf_hook_ops *ops);
55extern unsigned int arpt_do_table(struct sk_buff *skb, 56extern unsigned int arpt_do_table(struct sk_buff *skb,
56 const struct nf_hook_state *state, 57 const struct nf_hook_state *state,
57 struct xt_table *table); 58 struct xt_table *table);
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index aa598f942c01..7bfc5893ec31 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -24,10 +24,11 @@
24 24
25extern void ipt_init(void) __init; 25extern void ipt_init(void) __init;
26 26
27extern struct xt_table *ipt_register_table(struct net *net, 27int ipt_register_table(struct net *net, const struct xt_table *table,
28 const struct xt_table *table, 28 const struct ipt_replace *repl,
29 const struct ipt_replace *repl); 29 const struct nf_hook_ops *ops, struct xt_table **res);
30extern void ipt_unregister_table(struct net *net, struct xt_table *table); 30void ipt_unregister_table(struct net *net, struct xt_table *table,
31 const struct nf_hook_ops *ops);
31 32
32/* Standard entry. */ 33/* Standard entry. */
33struct ipt_standard { 34struct ipt_standard {
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 0f76e5c674f9..b21c392d6012 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -25,10 +25,11 @@
25extern void ip6t_init(void) __init; 25extern void ip6t_init(void) __init;
26 26
27extern void *ip6t_alloc_initial_table(const struct xt_table *); 27extern void *ip6t_alloc_initial_table(const struct xt_table *);
28extern struct xt_table *ip6t_register_table(struct net *net, 28int ip6t_register_table(struct net *net, const struct xt_table *table,
29 const struct xt_table *table, 29 const struct ip6t_replace *repl,
30 const struct ip6t_replace *repl); 30 const struct nf_hook_ops *ops, struct xt_table **res);
31extern void ip6t_unregister_table(struct net *net, struct xt_table *table); 31void ip6t_unregister_table(struct net *net, struct xt_table *table,
32 const struct nf_hook_ops *ops);
32extern unsigned int ip6t_do_table(struct sk_buff *skb, 33extern unsigned int ip6t_do_table(struct sk_buff *skb,
33 const struct nf_hook_state *state, 34 const struct nf_hook_state *state,
34 struct xt_table *table); 35 struct xt_table *table);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 0b41959aab9f..da14ab61f363 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -69,16 +69,6 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group)
69extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 69extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
70extern int netlink_has_listeners(struct sock *sk, unsigned int group); 70extern int netlink_has_listeners(struct sock *sk, unsigned int group);
71 71
72extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
73 unsigned int ldiff, u32 dst_portid,
74 gfp_t gfp_mask);
75static inline struct sk_buff *
76netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
77 gfp_t gfp_mask)
78{
79 return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
80}
81
82extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); 72extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
83extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, 73extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
84 __u32 group, gfp_t allocation); 74 __u32 group, gfp_t allocation);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index d6f9b4e6006d..011433478a14 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -529,6 +529,7 @@ enum pnfs_layouttype {
529 LAYOUT_OSD2_OBJECTS = 2, 529 LAYOUT_OSD2_OBJECTS = 2,
530 LAYOUT_BLOCK_VOLUME = 3, 530 LAYOUT_BLOCK_VOLUME = 3,
531 LAYOUT_FLEX_FILES = 4, 531 LAYOUT_FLEX_FILES = 4,
532 LAYOUT_SCSI = 5,
532 LAYOUT_TYPE_MAX 533 LAYOUT_TYPE_MAX
533}; 534};
534 535
@@ -555,6 +556,7 @@ enum pnfs_block_volume_type {
555 PNFS_BLOCK_VOLUME_SLICE = 1, 556 PNFS_BLOCK_VOLUME_SLICE = 1,
556 PNFS_BLOCK_VOLUME_CONCAT = 2, 557 PNFS_BLOCK_VOLUME_CONCAT = 2,
557 PNFS_BLOCK_VOLUME_STRIPE = 3, 558 PNFS_BLOCK_VOLUME_STRIPE = 3,
559 PNFS_BLOCK_VOLUME_SCSI = 4,
558}; 560};
559 561
560enum pnfs_block_extent_state { 562enum pnfs_block_extent_state {
@@ -568,6 +570,23 @@ enum pnfs_block_extent_state {
568#define PNFS_BLOCK_EXTENT_SIZE \ 570#define PNFS_BLOCK_EXTENT_SIZE \
569 (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE) 571 (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
570 572
573/* on the wire size of a scsi commit range */
574#define PNFS_SCSI_RANGE_SIZE \
575 (4 * sizeof(__be32))
576
577enum scsi_code_set {
578 PS_CODE_SET_BINARY = 1,
579 PS_CODE_SET_ASCII = 2,
580 PS_CODE_SET_UTF8 = 3
581};
582
583enum scsi_designator_type {
584 PS_DESIGNATOR_T10 = 1,
585 PS_DESIGNATOR_EUI64 = 2,
586 PS_DESIGNATOR_NAA = 3,
587 PS_DESIGNATOR_NAME = 8
588};
589
571#define NFL4_UFLG_MASK 0x0000003F 590#define NFL4_UFLG_MASK 0x0000003F
572#define NFL4_UFLG_DENSE 0x00000001 591#define NFL4_UFLG_DENSE 0x00000001
573#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002 592#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f2f650f136ee..957049f72290 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@ struct nfs_page {
41 struct page *wb_page; /* page to read in/write out */ 41 struct page *wb_page; /* page to read in/write out */
42 struct nfs_open_context *wb_context; /* File state context info */ 42 struct nfs_open_context *wb_context; /* File state context info */
43 struct nfs_lock_context *wb_lock_context; /* lock context info */ 43 struct nfs_lock_context *wb_lock_context; /* lock context info */
44 pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ 44 pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
45 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 45 unsigned int wb_offset, /* Offset & ~PAGE_MASK */
46 wb_pgbase, /* Start of page data */ 46 wb_pgbase, /* Start of page data */
47 wb_bytes; /* Length of request */ 47 wb_bytes; /* Length of request */
48 struct kref wb_kref; /* reference count */ 48 struct kref wb_kref; /* reference count */
@@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head)
184static inline 184static inline
185loff_t req_offset(struct nfs_page *req) 185loff_t req_offset(struct nfs_page *req)
186{ 186{
187 return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; 187 return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
188} 188}
189 189
190#endif /* _LINUX_NFS_PAGE_H */ 190#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763e4b86..e9fcf90b270d 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
331{ 331{
332 unsigned len = le16_to_cpu(dlen); 332 unsigned len = le16_to_cpu(dlen);
333 333
334#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 334#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
335 if (len == NILFS_MAX_REC_LEN) 335 if (len == NILFS_MAX_REC_LEN)
336 return 1 << 16; 336 return 1 << 16;
337#endif 337#endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
340 340
341static inline __le16 nilfs_rec_len_to_disk(unsigned len) 341static inline __le16 nilfs_rec_len_to_disk(unsigned len)
342{ 342{
343#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 343#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
344 if (len == (1 << 16)) 344 if (len == (1 << 16))
345 return cpu_to_le16(NILFS_MAX_REC_LEN); 345 return cpu_to_le16(NILFS_MAX_REC_LEN);
346 else if (len > (1 << 16)) 346 else if (len > (1 << 16))
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 7ec5b86735f3..4630eeae18e0 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -65,7 +65,6 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
65#endif 65#endif
66 66
67#ifdef CONFIG_LOCKUP_DETECTOR 67#ifdef CONFIG_LOCKUP_DETECTOR
68int hw_nmi_is_cpu_stuck(struct pt_regs *);
69u64 hw_nmi_get_sample_period(int watchdog_thresh); 68u64 hw_nmi_get_sample_period(int watchdog_thresh);
70extern int nmi_watchdog_enabled; 69extern int nmi_watchdog_enabled;
71extern int soft_watchdog_enabled; 70extern int soft_watchdog_enabled;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index d14a4c362465..4149868de4e6 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -47,6 +47,8 @@
47 * runtime initialization. 47 * runtime initialization.
48 */ 48 */
49 49
50struct notifier_block;
51
50typedef int (*notifier_fn_t)(struct notifier_block *nb, 52typedef int (*notifier_fn_t)(struct notifier_block *nb,
51 unsigned long action, void *data); 53 unsigned long action, void *data);
52 54
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 35fa08fd7739..ac0d65bef5d0 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -8,6 +8,7 @@ struct mnt_namespace;
8struct uts_namespace; 8struct uts_namespace;
9struct ipc_namespace; 9struct ipc_namespace;
10struct pid_namespace; 10struct pid_namespace;
11struct cgroup_namespace;
11struct fs_struct; 12struct fs_struct;
12 13
13/* 14/*
@@ -33,6 +34,7 @@ struct nsproxy {
33 struct mnt_namespace *mnt_ns; 34 struct mnt_namespace *mnt_ns;
34 struct pid_namespace *pid_ns_for_children; 35 struct pid_namespace *pid_ns_for_children;
35 struct net *net_ns; 36 struct net *net_ns;
37 struct cgroup_namespace *cgroup_ns;
36}; 38};
37extern struct nsproxy init_nsproxy; 39extern struct nsproxy init_nsproxy;
38 40
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index f798e2afba88..6f47562d477b 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -284,7 +284,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
284 /* ops->db_read_mask && */ 284 /* ops->db_read_mask && */
285 ops->db_set_mask && 285 ops->db_set_mask &&
286 ops->db_clear_mask && 286 ops->db_clear_mask &&
287 ops->peer_db_addr && 287 /* ops->peer_db_addr && */
288 /* ops->peer_db_read && */ 288 /* ops->peer_db_read && */
289 ops->peer_db_set && 289 ops->peer_db_set &&
290 /* ops->peer_db_clear && */ 290 /* ops->peer_db_clear && */
@@ -295,7 +295,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
295 ops->spad_count && 295 ops->spad_count &&
296 ops->spad_read && 296 ops->spad_read &&
297 ops->spad_write && 297 ops->spad_write &&
298 ops->peer_spad_addr && 298 /* ops->peer_spad_addr && */
299 /* ops->peer_spad_read && */ 299 /* ops->peer_spad_read && */
300 ops->peer_spad_write && 300 ops->peer_spad_write &&
301 1; 301 1;
@@ -757,6 +757,9 @@ static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
757 phys_addr_t *db_addr, 757 phys_addr_t *db_addr,
758 resource_size_t *db_size) 758 resource_size_t *db_size)
759{ 759{
760 if (!ntb->ops->peer_db_addr)
761 return -EINVAL;
762
760 return ntb->ops->peer_db_addr(ntb, db_addr, db_size); 763 return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
761} 764}
762 765
@@ -948,6 +951,9 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
948static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, 951static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
949 phys_addr_t *spad_addr) 952 phys_addr_t *spad_addr)
950{ 953{
954 if (!ntb->ops->peer_spad_addr)
955 return -EINVAL;
956
951 return ntb->ops->peer_spad_addr(ntb, idx, spad_addr); 957 return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
952} 958}
953 959
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 0b68caff1b3c..a4fcc90b0f20 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -23,6 +23,10 @@ struct nvmem_config {
23 const struct nvmem_cell_info *cells; 23 const struct nvmem_cell_info *cells;
24 int ncells; 24 int ncells;
25 bool read_only; 25 bool read_only;
26 bool root_only;
27 /* To be only used by old driver/misc/eeprom drivers */
28 bool compat;
29 struct device *base_dev;
26}; 30};
27 31
28#if IS_ENABLED(CONFIG_NVMEM) 32#if IS_ENABLED(CONFIG_NVMEM)
@@ -43,5 +47,4 @@ static inline int nvmem_unregister(struct nvmem_device *nvmem)
43} 47}
44 48
45#endif /* CONFIG_NVMEM */ 49#endif /* CONFIG_NVMEM */
46
47#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ 50#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index dc6e39696b64..31758036787c 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -133,7 +133,7 @@ void of_core_init(void);
133 133
134static inline bool is_of_node(struct fwnode_handle *fwnode) 134static inline bool is_of_node(struct fwnode_handle *fwnode)
135{ 135{
136 return fwnode && fwnode->type == FWNODE_OF; 136 return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
137} 137}
138 138
139static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) 139static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
@@ -296,13 +296,13 @@ extern int of_property_read_u64_array(const struct device_node *np,
296 u64 *out_values, 296 u64 *out_values,
297 size_t sz); 297 size_t sz);
298 298
299extern int of_property_read_string(struct device_node *np, 299extern int of_property_read_string(const struct device_node *np,
300 const char *propname, 300 const char *propname,
301 const char **out_string); 301 const char **out_string);
302extern int of_property_match_string(struct device_node *np, 302extern int of_property_match_string(const struct device_node *np,
303 const char *propname, 303 const char *propname,
304 const char *string); 304 const char *string);
305extern int of_property_read_string_helper(struct device_node *np, 305extern int of_property_read_string_helper(const struct device_node *np,
306 const char *propname, 306 const char *propname,
307 const char **out_strs, size_t sz, int index); 307 const char **out_strs, size_t sz, int index);
308extern int of_device_is_compatible(const struct device_node *device, 308extern int of_device_is_compatible(const struct device_node *device,
@@ -538,14 +538,14 @@ static inline int of_property_read_u64_array(const struct device_node *np,
538 return -ENOSYS; 538 return -ENOSYS;
539} 539}
540 540
541static inline int of_property_read_string(struct device_node *np, 541static inline int of_property_read_string(const struct device_node *np,
542 const char *propname, 542 const char *propname,
543 const char **out_string) 543 const char **out_string)
544{ 544{
545 return -ENOSYS; 545 return -ENOSYS;
546} 546}
547 547
548static inline int of_property_read_string_helper(struct device_node *np, 548static inline int of_property_read_string_helper(const struct device_node *np,
549 const char *propname, 549 const char *propname,
550 const char **out_strs, size_t sz, int index) 550 const char **out_strs, size_t sz, int index)
551{ 551{
@@ -571,7 +571,7 @@ static inline int of_property_read_u64(const struct device_node *np,
571 return -ENOSYS; 571 return -ENOSYS;
572} 572}
573 573
574static inline int of_property_match_string(struct device_node *np, 574static inline int of_property_match_string(const struct device_node *np,
575 const char *propname, 575 const char *propname,
576 const char *string) 576 const char *string)
577{ 577{
@@ -773,7 +773,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
773 * 773 *
774 * If @out_strs is NULL, the number of strings in the property is returned. 774 * If @out_strs is NULL, the number of strings in the property is returned.
775 */ 775 */
776static inline int of_property_read_string_array(struct device_node *np, 776static inline int of_property_read_string_array(const struct device_node *np,
777 const char *propname, const char **out_strs, 777 const char *propname, const char **out_strs,
778 size_t sz) 778 size_t sz)
779{ 779{
@@ -792,7 +792,7 @@ static inline int of_property_read_string_array(struct device_node *np,
792 * does not have a value, and -EILSEQ if the string is not null-terminated 792 * does not have a value, and -EILSEQ if the string is not null-terminated
793 * within the length of the property data. 793 * within the length of the property data.
794 */ 794 */
795static inline int of_property_count_strings(struct device_node *np, 795static inline int of_property_count_strings(const struct device_node *np,
796 const char *propname) 796 const char *propname)
797{ 797{
798 return of_property_read_string_helper(np, propname, NULL, 0, 0); 798 return of_property_read_string_helper(np, propname, NULL, 0, 0);
@@ -816,7 +816,7 @@ static inline int of_property_count_strings(struct device_node *np,
816 * 816 *
817 * The out_string pointer is modified only if a valid string can be decoded. 817 * The out_string pointer is modified only if a valid string can be decoded.
818 */ 818 */
819static inline int of_property_read_string_index(struct device_node *np, 819static inline int of_property_read_string_index(const struct device_node *np,
820 const char *propname, 820 const char *propname,
821 int index, const char **output) 821 int index, const char **output)
822{ 822{
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index df9ef3801812..2fbe8682a66f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -88,7 +88,7 @@ extern void unflatten_device_tree(void);
88extern void unflatten_and_copy_device_tree(void); 88extern void unflatten_and_copy_device_tree(void);
89extern void early_init_devtree(void *); 89extern void early_init_devtree(void *);
90extern void early_get_first_memblock_info(void *, phys_addr_t *); 90extern void early_get_first_memblock_info(void *, phys_addr_t *);
91extern u64 fdt_translate_address(const void *blob, int node_offset); 91extern u64 of_flat_dt_translate_address(unsigned long node);
92extern void of_fdt_limit_memory(int limit); 92extern void of_fdt_limit_memory(int limit);
93#else /* CONFIG_OF_FLATTREE */ 93#else /* CONFIG_OF_FLATTREE */
94static inline void early_init_fdt_scan_reserved_mem(void) {} 94static inline void early_init_fdt_scan_reserved_mem(void) {}
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 7dee00143afd..d833eb4dd446 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -51,6 +51,9 @@ struct gpmc_timings {
51 u32 adv_on; /* Assertion time */ 51 u32 adv_on; /* Assertion time */
52 u32 adv_rd_off; /* Read deassertion time */ 52 u32 adv_rd_off; /* Read deassertion time */
53 u32 adv_wr_off; /* Write deassertion time */ 53 u32 adv_wr_off; /* Write deassertion time */
54 u32 adv_aad_mux_on; /* ADV assertion time for AAD */
55 u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */
56 u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */
54 57
55 /* WE signals timings corresponding to GPMC_CONFIG4 */ 58 /* WE signals timings corresponding to GPMC_CONFIG4 */
56 u32 we_on; /* WE assertion time */ 59 u32 we_on; /* WE assertion time */
@@ -59,6 +62,8 @@ struct gpmc_timings {
59 /* OE signals timings corresponding to GPMC_CONFIG4 */ 62 /* OE signals timings corresponding to GPMC_CONFIG4 */
60 u32 oe_on; /* OE assertion time */ 63 u32 oe_on; /* OE assertion time */
61 u32 oe_off; /* OE deassertion time */ 64 u32 oe_off; /* OE deassertion time */
65 u32 oe_aad_mux_on; /* OE assertion time for AAD */
66 u32 oe_aad_mux_off; /* OE deassertion time for AAD */
62 67
63 /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ 68 /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
64 u32 page_burst_access; /* Multiple access word delay */ 69 u32 page_burst_access; /* Multiple access word delay */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 03e6257321f0..628a43242a34 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -76,8 +76,6 @@ extern unsigned long oom_badness(struct task_struct *p,
76 struct mem_cgroup *memcg, const nodemask_t *nodemask, 76 struct mem_cgroup *memcg, const nodemask_t *nodemask,
77 unsigned long totalpages); 77 unsigned long totalpages);
78 78
79extern int oom_kills_count(void);
80extern void note_oom_kill(void);
81extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, 79extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
82 unsigned int points, unsigned long totalpages, 80 unsigned int points, unsigned long totalpages,
83 struct mem_cgroup *memcg, const char *message); 81 struct mem_cgroup *memcg, const char *message);
@@ -91,7 +89,7 @@ extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
91 89
92extern bool out_of_memory(struct oom_control *oc); 90extern bool out_of_memory(struct oom_control *oc);
93 91
94extern void exit_oom_victim(void); 92extern void exit_oom_victim(struct task_struct *tsk);
95 93
96extern int register_oom_notifier(struct notifier_block *nb); 94extern int register_oom_notifier(struct notifier_block *nb);
97extern int unregister_oom_notifier(struct notifier_block *nb); 95extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index da523661500a..77b078c103b2 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -17,6 +17,8 @@
17#define ZONES_SHIFT 1 17#define ZONES_SHIFT 1
18#elif MAX_NR_ZONES <= 4 18#elif MAX_NR_ZONES <= 4
19#define ZONES_SHIFT 2 19#define ZONES_SHIFT 2
20#elif MAX_NR_ZONES <= 8
21#define ZONES_SHIFT 3
20#else 22#else
21#error ZONES_SHIFT -- too many zones configured adjust calculation 23#error ZONES_SHIFT -- too many zones configured adjust calculation
22#endif 24#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 19724e6ebd26..6b052aa7b5b7 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -144,12 +144,12 @@ static inline struct page *compound_head(struct page *page)
144 return page; 144 return page;
145} 145}
146 146
147static inline int PageTail(struct page *page) 147static __always_inline int PageTail(struct page *page)
148{ 148{
149 return READ_ONCE(page->compound_head) & 1; 149 return READ_ONCE(page->compound_head) & 1;
150} 150}
151 151
152static inline int PageCompound(struct page *page) 152static __always_inline int PageCompound(struct page *page)
153{ 153{
154 return test_bit(PG_head, &page->flags) || PageTail(page); 154 return test_bit(PG_head, &page->flags) || PageTail(page);
155} 155}
@@ -184,31 +184,31 @@ static inline int PageCompound(struct page *page)
184 * Macros to create function definitions for page flags 184 * Macros to create function definitions for page flags
185 */ 185 */
186#define TESTPAGEFLAG(uname, lname, policy) \ 186#define TESTPAGEFLAG(uname, lname, policy) \
187static inline int Page##uname(struct page *page) \ 187static __always_inline int Page##uname(struct page *page) \
188 { return test_bit(PG_##lname, &policy(page, 0)->flags); } 188 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
189 189
190#define SETPAGEFLAG(uname, lname, policy) \ 190#define SETPAGEFLAG(uname, lname, policy) \
191static inline void SetPage##uname(struct page *page) \ 191static __always_inline void SetPage##uname(struct page *page) \
192 { set_bit(PG_##lname, &policy(page, 1)->flags); } 192 { set_bit(PG_##lname, &policy(page, 1)->flags); }
193 193
194#define CLEARPAGEFLAG(uname, lname, policy) \ 194#define CLEARPAGEFLAG(uname, lname, policy) \
195static inline void ClearPage##uname(struct page *page) \ 195static __always_inline void ClearPage##uname(struct page *page) \
196 { clear_bit(PG_##lname, &policy(page, 1)->flags); } 196 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
197 197
198#define __SETPAGEFLAG(uname, lname, policy) \ 198#define __SETPAGEFLAG(uname, lname, policy) \
199static inline void __SetPage##uname(struct page *page) \ 199static __always_inline void __SetPage##uname(struct page *page) \
200 { __set_bit(PG_##lname, &policy(page, 1)->flags); } 200 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
201 201
202#define __CLEARPAGEFLAG(uname, lname, policy) \ 202#define __CLEARPAGEFLAG(uname, lname, policy) \
203static inline void __ClearPage##uname(struct page *page) \ 203static __always_inline void __ClearPage##uname(struct page *page) \
204 { __clear_bit(PG_##lname, &policy(page, 1)->flags); } 204 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
205 205
206#define TESTSETFLAG(uname, lname, policy) \ 206#define TESTSETFLAG(uname, lname, policy) \
207static inline int TestSetPage##uname(struct page *page) \ 207static __always_inline int TestSetPage##uname(struct page *page) \
208 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } 208 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
209 209
210#define TESTCLEARFLAG(uname, lname, policy) \ 210#define TESTCLEARFLAG(uname, lname, policy) \
211static inline int TestClearPage##uname(struct page *page) \ 211static __always_inline int TestClearPage##uname(struct page *page) \
212 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } 212 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
213 213
214#define PAGEFLAG(uname, lname, policy) \ 214#define PAGEFLAG(uname, lname, policy) \
@@ -371,7 +371,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
371#define PAGE_MAPPING_KSM 2 371#define PAGE_MAPPING_KSM 2
372#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) 372#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
373 373
374static inline int PageAnon(struct page *page) 374static __always_inline int PageAnon(struct page *page)
375{ 375{
376 page = compound_head(page); 376 page = compound_head(page);
377 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 377 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -384,7 +384,7 @@ static inline int PageAnon(struct page *page)
384 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 384 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
385 * anon_vma, but to that page's node of the stable tree. 385 * anon_vma, but to that page's node of the stable tree.
386 */ 386 */
387static inline int PageKsm(struct page *page) 387static __always_inline int PageKsm(struct page *page)
388{ 388{
389 page = compound_head(page); 389 page = compound_head(page);
390 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 390 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
@@ -415,14 +415,14 @@ static inline int PageUptodate(struct page *page)
415 return ret; 415 return ret;
416} 416}
417 417
418static inline void __SetPageUptodate(struct page *page) 418static __always_inline void __SetPageUptodate(struct page *page)
419{ 419{
420 VM_BUG_ON_PAGE(PageTail(page), page); 420 VM_BUG_ON_PAGE(PageTail(page), page);
421 smp_wmb(); 421 smp_wmb();
422 __set_bit(PG_uptodate, &page->flags); 422 __set_bit(PG_uptodate, &page->flags);
423} 423}
424 424
425static inline void SetPageUptodate(struct page *page) 425static __always_inline void SetPageUptodate(struct page *page)
426{ 426{
427 VM_BUG_ON_PAGE(PageTail(page), page); 427 VM_BUG_ON_PAGE(PageTail(page), page);
428 /* 428 /*
@@ -456,12 +456,12 @@ static inline void set_page_writeback_keepwrite(struct page *page)
456 456
457__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) 457__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
458 458
459static inline void set_compound_head(struct page *page, struct page *head) 459static __always_inline void set_compound_head(struct page *page, struct page *head)
460{ 460{
461 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); 461 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
462} 462}
463 463
464static inline void clear_compound_head(struct page *page) 464static __always_inline void clear_compound_head(struct page *page)
465{ 465{
466 WRITE_ONCE(page->compound_head, 0); 466 WRITE_ONCE(page->compound_head, 0);
467} 467}
@@ -517,6 +517,27 @@ static inline int PageTransCompound(struct page *page)
517} 517}
518 518
519/* 519/*
520 * PageTransCompoundMap is the same as PageTransCompound, but it also
521 * guarantees the primary MMU has the entire compound page mapped
522 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
523 * can also map the entire compound page. This allows the secondary
524 * MMUs to call get_user_pages() only once for each compound page and
525 * to immediately map the entire compound page with a single secondary
526 * MMU fault. If there will be a pmd split later, the secondary MMUs
527 * will get an update through the MMU notifier invalidation through
528 * split_huge_pmd().
529 *
530 * Unlike PageTransCompound, this is safe to be called only while
531 * split_huge_pmd() cannot run from under us, like if protected by the
532 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
533 * positives.
534 */
535static inline int PageTransCompoundMap(struct page *page)
536{
537 return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
538}
539
540/*
520 * PageTransTail returns true for both transparent huge pages 541 * PageTransTail returns true for both transparent huge pages
521 * and hugetlbfs pages, so it should only be called when it's known 542 * and hugetlbfs pages, so it should only be called when it's known
522 * that hugetlbfs pages aren't involved. 543 * that hugetlbfs pages aren't involved.
@@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page)
559#else 580#else
560TESTPAGEFLAG_FALSE(TransHuge) 581TESTPAGEFLAG_FALSE(TransHuge)
561TESTPAGEFLAG_FALSE(TransCompound) 582TESTPAGEFLAG_FALSE(TransCompound)
583TESTPAGEFLAG_FALSE(TransCompoundMap)
562TESTPAGEFLAG_FALSE(TransTail) 584TESTPAGEFLAG_FALSE(TransTail)
563TESTPAGEFLAG_FALSE(DoubleMap) 585TESTPAGEFLAG_FALSE(DoubleMap)
564 TESTSETFLAG_FALSE(DoubleMap) 586 TESTSETFLAG_FALSE(DoubleMap)
@@ -593,6 +615,8 @@ static inline void __ClearPageBuddy(struct page *page)
593 atomic_set(&page->_mapcount, -1); 615 atomic_set(&page->_mapcount, -1);
594} 616}
595 617
618extern bool is_free_buddy_page(struct page *page);
619
596#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) 620#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
597 621
598static inline int PageBalloon(struct page *page) 622static inline int PageBalloon(struct page *page)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 17f118a82854..e1fe7cf5bddf 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -45,6 +45,7 @@ struct page_ext {
45 unsigned int order; 45 unsigned int order;
46 gfp_t gfp_mask; 46 gfp_t gfp_mask;
47 unsigned int nr_entries; 47 unsigned int nr_entries;
48 int last_migrate_reason;
48 unsigned long trace_entries[8]; 49 unsigned long trace_entries[8];
49#endif 50#endif
50}; 51};
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index cacaabea8a09..46f1b939948c 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -1,38 +1,54 @@
1#ifndef __LINUX_PAGE_OWNER_H 1#ifndef __LINUX_PAGE_OWNER_H
2#define __LINUX_PAGE_OWNER_H 2#define __LINUX_PAGE_OWNER_H
3 3
4#include <linux/jump_label.h>
5
4#ifdef CONFIG_PAGE_OWNER 6#ifdef CONFIG_PAGE_OWNER
5extern bool page_owner_inited; 7extern struct static_key_false page_owner_inited;
6extern struct page_ext_operations page_owner_ops; 8extern struct page_ext_operations page_owner_ops;
7 9
8extern void __reset_page_owner(struct page *page, unsigned int order); 10extern void __reset_page_owner(struct page *page, unsigned int order);
9extern void __set_page_owner(struct page *page, 11extern void __set_page_owner(struct page *page,
10 unsigned int order, gfp_t gfp_mask); 12 unsigned int order, gfp_t gfp_mask);
11extern gfp_t __get_page_owner_gfp(struct page *page); 13extern gfp_t __get_page_owner_gfp(struct page *page);
14extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
15extern void __set_page_owner_migrate_reason(struct page *page, int reason);
16extern void __dump_page_owner(struct page *page);
12 17
13static inline void reset_page_owner(struct page *page, unsigned int order) 18static inline void reset_page_owner(struct page *page, unsigned int order)
14{ 19{
15 if (likely(!page_owner_inited)) 20 if (static_branch_unlikely(&page_owner_inited))
16 return; 21 __reset_page_owner(page, order);
17
18 __reset_page_owner(page, order);
19} 22}
20 23
21static inline void set_page_owner(struct page *page, 24static inline void set_page_owner(struct page *page,
22 unsigned int order, gfp_t gfp_mask) 25 unsigned int order, gfp_t gfp_mask)
23{ 26{
24 if (likely(!page_owner_inited)) 27 if (static_branch_unlikely(&page_owner_inited))
25 return; 28 __set_page_owner(page, order, gfp_mask);
26
27 __set_page_owner(page, order, gfp_mask);
28} 29}
29 30
30static inline gfp_t get_page_owner_gfp(struct page *page) 31static inline gfp_t get_page_owner_gfp(struct page *page)
31{ 32{
32 if (likely(!page_owner_inited)) 33 if (static_branch_unlikely(&page_owner_inited))
34 return __get_page_owner_gfp(page);
35 else
33 return 0; 36 return 0;
34 37}
35 return __get_page_owner_gfp(page); 38static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
39{
40 if (static_branch_unlikely(&page_owner_inited))
41 __copy_page_owner(oldpage, newpage);
42}
43static inline void set_page_owner_migrate_reason(struct page *page, int reason)
44{
45 if (static_branch_unlikely(&page_owner_inited))
46 __set_page_owner_migrate_reason(page, reason);
47}
48static inline void dump_page_owner(struct page *page)
49{
50 if (static_branch_unlikely(&page_owner_inited))
51 __dump_page_owner(page);
36} 52}
37#else 53#else
38static inline void reset_page_owner(struct page *page, unsigned int order) 54static inline void reset_page_owner(struct page *page, unsigned int order)
@@ -46,6 +62,14 @@ static inline gfp_t get_page_owner_gfp(struct page *page)
46{ 62{
47 return 0; 63 return 0;
48} 64}
49 65static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
66{
67}
68static inline void set_page_owner_migrate_reason(struct page *page, int reason)
69{
70}
71static inline void dump_page_owner(struct page *page)
72{
73}
50#endif /* CONFIG_PAGE_OWNER */ 74#endif /* CONFIG_PAGE_OWNER */
51#endif /* __LINUX_PAGE_OWNER_H */ 75#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
new file mode 100644
index 000000000000..e596d5d9540e
--- /dev/null
+++ b/include/linux/page_ref.h
@@ -0,0 +1,173 @@
1#ifndef _LINUX_PAGE_REF_H
2#define _LINUX_PAGE_REF_H
3
4#include <linux/atomic.h>
5#include <linux/mm_types.h>
6#include <linux/page-flags.h>
7#include <linux/tracepoint-defs.h>
8
9extern struct tracepoint __tracepoint_page_ref_set;
10extern struct tracepoint __tracepoint_page_ref_mod;
11extern struct tracepoint __tracepoint_page_ref_mod_and_test;
12extern struct tracepoint __tracepoint_page_ref_mod_and_return;
13extern struct tracepoint __tracepoint_page_ref_mod_unless;
14extern struct tracepoint __tracepoint_page_ref_freeze;
15extern struct tracepoint __tracepoint_page_ref_unfreeze;
16
17#ifdef CONFIG_DEBUG_PAGE_REF
18
19/*
20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
21 * functions. But due to include header file issues, that is not
22 * feasible. Instead we have to open code the static key functions.
23 *
24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
25 */
26#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
27
28extern void __page_ref_set(struct page *page, int v);
29extern void __page_ref_mod(struct page *page, int v);
30extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
31extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
32extern void __page_ref_mod_unless(struct page *page, int v, int u);
33extern void __page_ref_freeze(struct page *page, int v, int ret);
34extern void __page_ref_unfreeze(struct page *page, int v);
35
36#else
37
38#define page_ref_tracepoint_active(t) false
39
40static inline void __page_ref_set(struct page *page, int v)
41{
42}
43static inline void __page_ref_mod(struct page *page, int v)
44{
45}
46static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
47{
48}
49static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
50{
51}
52static inline void __page_ref_mod_unless(struct page *page, int v, int u)
53{
54}
55static inline void __page_ref_freeze(struct page *page, int v, int ret)
56{
57}
58static inline void __page_ref_unfreeze(struct page *page, int v)
59{
60}
61
62#endif
63
64static inline int page_ref_count(struct page *page)
65{
66 return atomic_read(&page->_count);
67}
68
69static inline int page_count(struct page *page)
70{
71 return atomic_read(&compound_head(page)->_count);
72}
73
74static inline void set_page_count(struct page *page, int v)
75{
76 atomic_set(&page->_count, v);
77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
78 __page_ref_set(page, v);
79}
80
81/*
82 * Setup the page count before being freed into the page allocator for
83 * the first time (boot or memory hotplug)
84 */
85static inline void init_page_count(struct page *page)
86{
87 set_page_count(page, 1);
88}
89
90static inline void page_ref_add(struct page *page, int nr)
91{
92 atomic_add(nr, &page->_count);
93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
94 __page_ref_mod(page, nr);
95}
96
97static inline void page_ref_sub(struct page *page, int nr)
98{
99 atomic_sub(nr, &page->_count);
100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
101 __page_ref_mod(page, -nr);
102}
103
104static inline void page_ref_inc(struct page *page)
105{
106 atomic_inc(&page->_count);
107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
108 __page_ref_mod(page, 1);
109}
110
111static inline void page_ref_dec(struct page *page)
112{
113 atomic_dec(&page->_count);
114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
115 __page_ref_mod(page, -1);
116}
117
118static inline int page_ref_sub_and_test(struct page *page, int nr)
119{
120 int ret = atomic_sub_and_test(nr, &page->_count);
121
122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
123 __page_ref_mod_and_test(page, -nr, ret);
124 return ret;
125}
126
127static inline int page_ref_dec_and_test(struct page *page)
128{
129 int ret = atomic_dec_and_test(&page->_count);
130
131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
132 __page_ref_mod_and_test(page, -1, ret);
133 return ret;
134}
135
136static inline int page_ref_dec_return(struct page *page)
137{
138 int ret = atomic_dec_return(&page->_count);
139
140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
141 __page_ref_mod_and_return(page, -1, ret);
142 return ret;
143}
144
145static inline int page_ref_add_unless(struct page *page, int nr, int u)
146{
147 int ret = atomic_add_unless(&page->_count, nr, u);
148
149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
150 __page_ref_mod_unless(page, nr, ret);
151 return ret;
152}
153
154static inline int page_ref_freeze(struct page *page, int count)
155{
156 int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
157
158 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
159 __page_ref_freeze(page, count, ret);
160 return ret;
161}
162
163static inline void page_ref_unfreeze(struct page *page, int count)
164{
165 VM_BUG_ON_PAGE(page_count(page) != 0, page);
166 VM_BUG_ON(count == 0);
167
168 atomic_set(&page->_count, count);
169 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
170 __page_ref_unfreeze(page, count);
171}
172
173#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 92395a0a7dc5..7e1ab155c67c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
86 (__force unsigned long)mask; 86 (__force unsigned long)mask;
87} 87}
88 88
89/*
90 * The page cache can be done in larger chunks than
91 * one page, because it allows for more efficient
92 * throughput (it can then be mapped into user
93 * space in smaller chunks for same flexibility).
94 *
95 * Or rather, it _will_ be done in larger chunks.
96 */
97#define PAGE_CACHE_SHIFT PAGE_SHIFT
98#define PAGE_CACHE_SIZE PAGE_SIZE
99#define PAGE_CACHE_MASK PAGE_MASK
100#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
101
102#define page_cache_get(page) get_page(page)
103#define page_cache_release(page) put_page(page)
104void release_pages(struct page **pages, int nr, bool cold); 89void release_pages(struct page **pages, int nr, bool cold);
105 90
106/* 91/*
@@ -165,7 +150,7 @@ static inline int page_cache_get_speculative(struct page *page)
165 * SMP requires. 150 * SMP requires.
166 */ 151 */
167 VM_BUG_ON_PAGE(page_count(page) == 0, page); 152 VM_BUG_ON_PAGE(page_count(page) == 0, page);
168 atomic_inc(&page->_count); 153 page_ref_inc(page);
169 154
170#else 155#else
171 if (unlikely(!get_page_unless_zero(page))) { 156 if (unlikely(!get_page_unless_zero(page))) {
@@ -194,10 +179,10 @@ static inline int page_cache_add_speculative(struct page *page, int count)
194 VM_BUG_ON(!in_atomic()); 179 VM_BUG_ON(!in_atomic());
195# endif 180# endif
196 VM_BUG_ON_PAGE(page_count(page) == 0, page); 181 VM_BUG_ON_PAGE(page_count(page) == 0, page);
197 atomic_add(count, &page->_count); 182 page_ref_add(page, count);
198 183
199#else 184#else
200 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 185 if (unlikely(!page_ref_add_unless(page, count, 0)))
201 return 0; 186 return 0;
202#endif 187#endif
203 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); 188 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
@@ -205,19 +190,6 @@ static inline int page_cache_add_speculative(struct page *page, int count)
205 return 1; 190 return 1;
206} 191}
207 192
208static inline int page_freeze_refs(struct page *page, int count)
209{
210 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
211}
212
213static inline void page_unfreeze_refs(struct page *page, int count)
214{
215 VM_BUG_ON_PAGE(page_count(page) != 0, page);
216 VM_BUG_ON(count == 0);
217
218 atomic_set(&page->_count, count);
219}
220
221#ifdef CONFIG_NUMA 193#ifdef CONFIG_NUMA
222extern struct page *__page_cache_alloc(gfp_t gfp); 194extern struct page *__page_cache_alloc(gfp_t gfp);
223#else 195#else
@@ -403,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
403 return page->index << compound_order(page); 375 return page->index << compound_order(page);
404 376
405 if (likely(!PageTransTail(page))) 377 if (likely(!PageTransTail(page)))
406 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 378 return page->index;
407 379
408 /* 380 /*
409 * We don't initialize ->index for tail pages: calculate based on 381 * We don't initialize ->index for tail pages: calculate based on
410 * head page 382 * head page
411 */ 383 */
412 pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 384 pgoff = compound_head(page)->index;
413 pgoff += page - compound_head(page); 385 pgoff += page - compound_head(page);
414 return pgoff; 386 return pgoff;
415} 387}
@@ -419,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
419 */ 391 */
420static inline loff_t page_offset(struct page *page) 392static inline loff_t page_offset(struct page *page)
421{ 393{
422 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 394 return ((loff_t)page->index) << PAGE_SHIFT;
423} 395}
424 396
425static inline loff_t page_file_offset(struct page *page) 397static inline loff_t page_file_offset(struct page *page)
426{ 398{
427 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; 399 return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
428} 400}
429 401
430extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 402extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -438,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
438 return linear_hugepage_index(vma, address); 410 return linear_hugepage_index(vma, address);
439 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 411 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
440 pgoff += vma->vm_pgoff; 412 pgoff += vma->vm_pgoff;
441 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 413 return pgoff;
442} 414}
443 415
444extern void __lock_page(struct page *page); 416extern void __lock_page(struct page *page);
@@ -548,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
548/* 520/*
549 * Fault a userspace page into pagetables. Return non-zero on a fault. 521 * Fault a userspace page into pagetables. Return non-zero on a fault.
550 * 522 *
551 * This assumes that two userspace pages are always sufficient. That's 523 * This assumes that two userspace pages are always sufficient.
552 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
553 */ 524 */
554static inline int fault_in_pages_writeable(char __user *uaddr, int size) 525static inline int fault_in_pages_writeable(char __user *uaddr, int size)
555{ 526{
@@ -663,8 +634,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
663int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 634int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
664 pgoff_t index, gfp_t gfp_mask); 635 pgoff_t index, gfp_t gfp_mask);
665extern void delete_from_page_cache(struct page *page); 636extern void delete_from_page_cache(struct page *page);
666extern void __delete_from_page_cache(struct page *page, void *shadow, 637extern void __delete_from_page_cache(struct page *page, void *shadow);
667 struct mem_cgroup *memcg);
668int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); 638int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
669 639
670/* 640/*
@@ -685,8 +655,8 @@ static inline int add_to_page_cache(struct page *page,
685 655
686static inline unsigned long dir_pages(struct inode *inode) 656static inline unsigned long dir_pages(struct inode *inode)
687{ 657{
688 return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> 658 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
689 PAGE_CACHE_SHIFT; 659 PAGE_SHIFT;
690} 660}
691 661
692#endif /* _LINUX_PAGEMAP_H */ 662#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
new file mode 100644
index 000000000000..39726caef5b1
--- /dev/null
+++ b/include/linux/pci-dma-compat.h
@@ -0,0 +1,147 @@
1/* include this file if the platform implements the dma_ DMA Mapping API
2 * and wants to provide the pci_ DMA Mapping API in terms of it */
3
4#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
5#define _ASM_GENERIC_PCI_DMA_COMPAT_H
6
7#include <linux/dma-mapping.h>
8
9/* This defines the direction arg to the DMA mapping routines. */
10#define PCI_DMA_BIDIRECTIONAL 0
11#define PCI_DMA_TODEVICE 1
12#define PCI_DMA_FROMDEVICE 2
13#define PCI_DMA_NONE 3
14
15static inline void *
16pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
17 dma_addr_t *dma_handle)
18{
19 return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
20}
21
22static inline void *
23pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
24 dma_addr_t *dma_handle)
25{
26 return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
27 size, dma_handle, GFP_ATOMIC);
28}
29
30static inline void
31pci_free_consistent(struct pci_dev *hwdev, size_t size,
32 void *vaddr, dma_addr_t dma_handle)
33{
34 dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
35}
36
37static inline dma_addr_t
38pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
39{
40 return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
41}
42
43static inline void
44pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
45 size_t size, int direction)
46{
47 dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
48}
49
50static inline dma_addr_t
51pci_map_page(struct pci_dev *hwdev, struct page *page,
52 unsigned long offset, size_t size, int direction)
53{
54 return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
55}
56
57static inline void
58pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
59 size_t size, int direction)
60{
61 dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
62}
63
64static inline int
65pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
66 int nents, int direction)
67{
68 return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
69}
70
71static inline void
72pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
73 int nents, int direction)
74{
75 dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
76}
77
78static inline void
79pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
80 size_t size, int direction)
81{
82 dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
83}
84
85static inline void
86pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
87 size_t size, int direction)
88{
89 dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
90}
91
92static inline void
93pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
94 int nelems, int direction)
95{
96 dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
97}
98
99static inline void
100pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
101 int nelems, int direction)
102{
103 dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
104}
105
106static inline int
107pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
108{
109 return dma_mapping_error(&pdev->dev, dma_addr);
110}
111
112#ifdef CONFIG_PCI
113static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
114{
115 return dma_set_mask(&dev->dev, mask);
116}
117
118static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
119{
120 return dma_set_coherent_mask(&dev->dev, mask);
121}
122
123static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
124 unsigned int size)
125{
126 return dma_set_max_seg_size(&dev->dev, size);
127}
128
129static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
130 unsigned long mask)
131{
132 return dma_set_seg_boundary(&dev->dev, mask);
133}
134#else
135static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
136{ return -EIO; }
137static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
138{ return -EIO; }
139static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
140 unsigned int size)
141{ return -EIO; }
142static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
143 unsigned long mask)
144{ return -EIO; }
145#endif
146
147#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 27716254dcc5..932ec74909c6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -70,12 +70,6 @@ enum pci_mmap_state {
70 pci_mmap_mem 70 pci_mmap_mem
71}; 71};
72 72
73/* This defines the direction arg to the DMA mapping routines. */
74#define PCI_DMA_BIDIRECTIONAL 0
75#define PCI_DMA_TODEVICE 1
76#define PCI_DMA_FROMDEVICE 2
77#define PCI_DMA_NONE 3
78
79/* 73/*
80 * For PCI devices, the region numbers are assigned this way: 74 * For PCI devices, the region numbers are assigned this way:
81 */ 75 */
@@ -359,6 +353,7 @@ struct pci_dev {
359 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ 353 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
360 unsigned int irq_managed:1; 354 unsigned int irq_managed:1;
361 unsigned int has_secondary_link:1; 355 unsigned int has_secondary_link:1;
356 unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
362 pci_dev_flags_t dev_flags; 357 pci_dev_flags_t dev_flags;
363 atomic_t enable_cnt; /* pci_enable_device has been called */ 358 atomic_t enable_cnt; /* pci_enable_device has been called */
364 359
@@ -578,6 +573,8 @@ static inline int pcibios_err_to_errno(int err)
578/* Low-level architecture-dependent routines */ 573/* Low-level architecture-dependent routines */
579 574
580struct pci_ops { 575struct pci_ops {
576 int (*add_bus)(struct pci_bus *bus);
577 void (*remove_bus)(struct pci_bus *bus);
581 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); 578 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
582 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 579 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
583 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 580 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
@@ -746,9 +743,26 @@ struct pci_driver {
746 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 743 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
747 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 744 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
748 745
746enum {
747 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
748 PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
749 PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
750 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
751 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
752 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
753 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
754};
755
749/* these external functions are only available when PCI support is enabled */ 756/* these external functions are only available when PCI support is enabled */
750#ifdef CONFIG_PCI 757#ifdef CONFIG_PCI
751 758
759extern unsigned int pci_flags;
760
761static inline void pci_set_flags(int flags) { pci_flags = flags; }
762static inline void pci_add_flags(int flags) { pci_flags |= flags; }
763static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
764static inline int pci_has_flag(int flag) { return pci_flags & flag; }
765
752void pcie_bus_configure_settings(struct pci_bus *bus); 766void pcie_bus_configure_settings(struct pci_bus *bus);
753 767
754enum pcie_bus_config_types { 768enum pcie_bus_config_types {
@@ -770,6 +784,7 @@ extern struct list_head pci_root_buses; /* list of all known PCI buses */
770int no_pci_devices(void); 784int no_pci_devices(void);
771 785
772void pcibios_resource_survey_bus(struct pci_bus *bus); 786void pcibios_resource_survey_bus(struct pci_bus *bus);
787void pcibios_bus_add_device(struct pci_dev *pdev);
773void pcibios_add_bus(struct pci_bus *bus); 788void pcibios_add_bus(struct pci_bus *bus);
774void pcibios_remove_bus(struct pci_bus *bus); 789void pcibios_remove_bus(struct pci_bus *bus);
775void pcibios_fixup_bus(struct pci_bus *); 790void pcibios_fixup_bus(struct pci_bus *);
@@ -1004,8 +1019,6 @@ void pci_intx(struct pci_dev *dev, int enable);
1004bool pci_intx_mask_supported(struct pci_dev *dev); 1019bool pci_intx_mask_supported(struct pci_dev *dev);
1005bool pci_check_and_mask_intx(struct pci_dev *dev); 1020bool pci_check_and_mask_intx(struct pci_dev *dev);
1006bool pci_check_and_unmask_intx(struct pci_dev *dev); 1021bool pci_check_and_unmask_intx(struct pci_dev *dev);
1007int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
1008int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
1009int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); 1022int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1010int pci_wait_for_pending_transaction(struct pci_dev *dev); 1023int pci_wait_for_pending_transaction(struct pci_dev *dev);
1011int pcix_get_max_mmrbc(struct pci_dev *dev); 1024int pcix_get_max_mmrbc(struct pci_dev *dev);
@@ -1098,6 +1111,7 @@ void pci_unlock_rescan_remove(void);
1098/* Vital product data routines */ 1111/* Vital product data routines */
1099ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1112ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1100ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1113ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1114int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1101 1115
1102/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1116/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1103resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); 1117resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1221,6 +1235,7 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
1221 1235
1222int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1236int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1223 unsigned int command_bits, u32 flags); 1237 unsigned int command_bits, u32 flags);
1238
1224/* kmem_cache style wrapper around pci_alloc_consistent() */ 1239/* kmem_cache style wrapper around pci_alloc_consistent() */
1225 1240
1226#include <linux/pci-dma.h> 1241#include <linux/pci-dma.h>
@@ -1388,6 +1403,11 @@ void pci_register_set_vga_state(arch_set_vga_state_t func);
1388 1403
1389#else /* CONFIG_PCI is not enabled */ 1404#else /* CONFIG_PCI is not enabled */
1390 1405
1406static inline void pci_set_flags(int flags) { }
1407static inline void pci_add_flags(int flags) { }
1408static inline void pci_clear_flags(int flags) { }
1409static inline int pci_has_flag(int flag) { return 0; }
1410
1391/* 1411/*
1392 * If the system does not have PCI, clearly these return errors. Define 1412 * If the system does not have PCI, clearly these return errors. Define
1393 * these as simple inline functions to avoid hair in drivers. 1413 * these as simple inline functions to avoid hair in drivers.
@@ -1427,16 +1447,6 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
1427static inline void pci_set_master(struct pci_dev *dev) { } 1447static inline void pci_set_master(struct pci_dev *dev) { }
1428static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } 1448static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
1429static inline void pci_disable_device(struct pci_dev *dev) { } 1449static inline void pci_disable_device(struct pci_dev *dev) { }
1430static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
1431{ return -EIO; }
1432static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
1433{ return -EIO; }
1434static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
1435 unsigned int size)
1436{ return -EIO; }
1437static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
1438 unsigned long mask)
1439{ return -EIO; }
1440static inline int pci_assign_resource(struct pci_dev *dev, int i) 1450static inline int pci_assign_resource(struct pci_dev *dev, int i)
1441{ return -EBUSY; } 1451{ return -EBUSY; }
1442static inline int __pci_register_driver(struct pci_driver *drv, 1452static inline int __pci_register_driver(struct pci_driver *drv,
@@ -1498,6 +1508,10 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1498 1508
1499#include <asm/pci.h> 1509#include <asm/pci.h>
1500 1510
1511#ifndef pci_root_bus_fwnode
1512#define pci_root_bus_fwnode(bus) NULL
1513#endif
1514
1501/* these helpers provide future and backwards compatibility 1515/* these helpers provide future and backwards compatibility
1502 * for accessing popular PCI BAR info */ 1516 * for accessing popular PCI BAR info */
1503#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) 1517#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
@@ -1721,6 +1735,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
1721 1735
1722int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); 1736int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
1723void pci_disable_sriov(struct pci_dev *dev); 1737void pci_disable_sriov(struct pci_dev *dev);
1738int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
1739void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
1724int pci_num_vf(struct pci_dev *dev); 1740int pci_num_vf(struct pci_dev *dev);
1725int pci_vfs_assigned(struct pci_dev *dev); 1741int pci_vfs_assigned(struct pci_dev *dev);
1726int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); 1742int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
@@ -1737,6 +1753,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
1737} 1753}
1738static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) 1754static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1739{ return -ENODEV; } 1755{ return -ENODEV; }
1756static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
1757{
1758 return -ENOSYS;
1759}
1760static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
1761 int id, int reset) { }
1740static inline void pci_disable_sriov(struct pci_dev *dev) { } 1762static inline void pci_disable_sriov(struct pci_dev *dev) { }
1741static inline int pci_num_vf(struct pci_dev *dev) { return 0; } 1763static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
1742static inline int pci_vfs_assigned(struct pci_dev *dev) 1764static inline int pci_vfs_assigned(struct pci_dev *dev)
@@ -1817,12 +1839,13 @@ bool pci_acs_path_enabled(struct pci_dev *start,
1817#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) 1839#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
1818 1840
1819/* Small Resource Data Type Tag Item Names */ 1841/* Small Resource Data Type Tag Item Names */
1820#define PCI_VPD_STIN_END 0x78 /* End */ 1842#define PCI_VPD_STIN_END 0x0f /* End */
1821 1843
1822#define PCI_VPD_SRDT_END PCI_VPD_STIN_END 1844#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
1823 1845
1824#define PCI_VPD_SRDT_TIN_MASK 0x78 1846#define PCI_VPD_SRDT_TIN_MASK 0x78
1825#define PCI_VPD_SRDT_LEN_MASK 0x07 1847#define PCI_VPD_SRDT_LEN_MASK 0x07
1848#define PCI_VPD_LRDT_TIN_MASK 0x7f
1826 1849
1827#define PCI_VPD_LRDT_TAG_SIZE 3 1850#define PCI_VPD_LRDT_TAG_SIZE 3
1828#define PCI_VPD_SRDT_TAG_SIZE 1 1851#define PCI_VPD_SRDT_TAG_SIZE 1
@@ -1846,6 +1869,17 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
1846} 1869}
1847 1870
1848/** 1871/**
1872 * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
1873 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
1874 *
1875 * Returns the extracted Large Resource Data Type Tag item.
1876 */
1877static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
1878{
1879 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
1880}
1881
1882/**
1849 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length 1883 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
1850 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag 1884 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
1851 * 1885 *
@@ -1857,6 +1891,17 @@ static inline u8 pci_vpd_srdt_size(const u8 *srdt)
1857} 1891}
1858 1892
1859/** 1893/**
1894 * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
1895 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
1896 *
1897 * Returns the extracted Small Resource Data Type Tag Item.
1898 */
1899static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
1900{
1901 return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
1902}
1903
1904/**
1860 * pci_vpd_info_field_size - Extracts the information field length 1905 * pci_vpd_info_field_size - Extracts the information field length
1861 * @lrdt: Pointer to the beginning of an information field header 1906 * @lrdt: Pointer to the beginning of an information field header
1862 * 1907 *
@@ -1972,4 +2017,8 @@ static inline bool pci_ari_enabled(struct pci_bus *bus)
1972{ 2017{
1973 return bus->self && bus->self->ari_enabled; 2018 return bus->self && bus->self->ari_enabled;
1974} 2019}
2020
2021/* provide the legacy pci_dma_* API */
2022#include <linux/pci-dma-compat.h>
2023
1975#endif /* LINUX_PCI_H */ 2024#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 37f05cb1dfd6..247da8c95860 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -110,6 +110,7 @@
110#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 110#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
111#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 111#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
112#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 112#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
113#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
113#define PCI_CLASS_SERIAL_FIBER 0x0c04 114#define PCI_CLASS_SERIAL_FIBER 0x0c04
114#define PCI_CLASS_SERIAL_SMBUS 0x0c05 115#define PCI_CLASS_SERIAL_SMBUS 0x0c05
115 116
@@ -2506,6 +2507,10 @@
2506 2507
2507#define PCI_VENDOR_ID_AZWAVE 0x1a3b 2508#define PCI_VENDOR_ID_AZWAVE 0x1a3b
2508 2509
2510#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
2511#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
2512#define PCI_SUBDEVICE_ID_QEMU 0x1100
2513
2509#define PCI_VENDOR_ID_ASMEDIA 0x1b21 2514#define PCI_VENDOR_ID_ASMEDIA 0x1b21
2510 2515
2511#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 2516#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 83b5e34c6580..4196c90a3c88 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -104,9 +104,11 @@ struct arm_pmu {
104 atomic_t active_events; 104 atomic_t active_events;
105 struct mutex reserve_mutex; 105 struct mutex reserve_mutex;
106 u64 max_period; 106 u64 max_period;
107 bool secure_access; /* 32-bit ARM only */
107 struct platform_device *plat_device; 108 struct platform_device *plat_device;
108 struct pmu_hw_events __percpu *hw_events; 109 struct pmu_hw_events __percpu *hw_events;
109 struct notifier_block hotplug_nb; 110 struct notifier_block hotplug_nb;
111 struct notifier_block cpu_pm_nb;
110}; 112};
111 113
112#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 114#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f5c5a3fa2c81..f291275ffd71 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -121,6 +121,7 @@ struct hw_perf_event {
121 struct { /* intel_cqm */ 121 struct { /* intel_cqm */
122 int cqm_state; 122 int cqm_state;
123 u32 cqm_rmid; 123 u32 cqm_rmid;
124 int is_group_event;
124 struct list_head cqm_events_entry; 125 struct list_head cqm_events_entry;
125 struct list_head cqm_groups_entry; 126 struct list_head cqm_groups_entry;
126 struct list_head cqm_group_entry; 127 struct list_head cqm_group_entry;
@@ -128,6 +129,10 @@ struct hw_perf_event {
128 struct { /* itrace */ 129 struct { /* itrace */
129 int itrace_started; 130 int itrace_started;
130 }; 131 };
132 struct { /* amd_power */
133 u64 pwr_acc;
134 u64 ptsc;
135 };
131#ifdef CONFIG_HAVE_HW_BREAKPOINT 136#ifdef CONFIG_HAVE_HW_BREAKPOINT
132 struct { /* breakpoint */ 137 struct { /* breakpoint */
133 /* 138 /*
@@ -468,6 +473,7 @@ struct perf_event {
468 int group_flags; 473 int group_flags;
469 struct perf_event *group_leader; 474 struct perf_event *group_leader;
470 struct pmu *pmu; 475 struct pmu *pmu;
476 void *pmu_private;
471 477
472 enum perf_event_active_state state; 478 enum perf_event_active_state state;
473 unsigned int attach_state; 479 unsigned int attach_state;
@@ -965,11 +971,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
965 971
966extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); 972extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
967extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); 973extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
974extern struct perf_callchain_entry *
975get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
976 bool crosstask, bool add_mark);
977extern int get_callchain_buffers(void);
978extern void put_callchain_buffers(void);
968 979
969static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) 980static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
970{ 981{
971 if (entry->nr < PERF_MAX_STACK_DEPTH) 982 if (entry->nr < PERF_MAX_STACK_DEPTH) {
972 entry->ip[entry->nr++] = ip; 983 entry->ip[entry->nr++] = ip;
984 return 0;
985 } else {
986 return -1; /* no more room, stop walking the stack */
987 }
973} 988}
974 989
975extern int sysctl_perf_event_paranoid; 990extern int sysctl_perf_event_paranoid;
@@ -1109,12 +1124,6 @@ static inline void perf_event_task_tick(void) { }
1109static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } 1124static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1110#endif 1125#endif
1111 1126
1112#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
1113extern bool perf_event_can_stop_tick(void);
1114#else
1115static inline bool perf_event_can_stop_tick(void) { return true; }
1116#endif
1117
1118#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 1127#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1119extern void perf_restore_debug_store(void); 1128extern void perf_restore_debug_store(void);
1120#else 1129#else
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d6f3641e7933..2abd7918f64f 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,8 +327,6 @@ struct phy_c45_device_ids {
327/* phy_device: An instance of a PHY 327/* phy_device: An instance of a PHY
328 * 328 *
329 * drv: Pointer to the driver for this PHY instance 329 * drv: Pointer to the driver for this PHY instance
330 * bus: Pointer to the bus this PHY is on
331 * dev: driver model device structure for this PHY
332 * phy_id: UID for this device found during discovery 330 * phy_id: UID for this device found during discovery
333 * c45_ids: 802.3-c45 Device Identifers if is_c45. 331 * c45_ids: 802.3-c45 Device Identifers if is_c45.
334 * is_c45: Set to true if this phy uses clause 45 addressing. 332 * is_c45: Set to true if this phy uses clause 45 addressing.
@@ -338,7 +336,6 @@ struct phy_c45_device_ids {
338 * suspended: Set to true if this phy has been suspended successfully. 336 * suspended: Set to true if this phy has been suspended successfully.
339 * state: state of the PHY for management purposes 337 * state: state of the PHY for management purposes
340 * dev_flags: Device-specific flags used by the PHY driver. 338 * dev_flags: Device-specific flags used by the PHY driver.
341 * addr: Bus address of PHY
342 * link_timeout: The number of timer firings to wait before the 339 * link_timeout: The number of timer firings to wait before the
343 * giving up on the current attempt at acquiring a link 340 * giving up on the current attempt at acquiring a link
344 * irq: IRQ number of the PHY's interrupt (-1 if none) 341 * irq: IRQ number of the PHY's interrupt (-1 if none)
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 2400d2ea4f34..1d41ec44e39d 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -19,7 +19,7 @@ extern struct phy_device *fixed_phy_register(unsigned int irq,
19 struct fixed_phy_status *status, 19 struct fixed_phy_status *status,
20 int link_gpio, 20 int link_gpio,
21 struct device_node *np); 21 struct device_node *np);
22extern void fixed_phy_del(int phy_addr); 22extern void fixed_phy_unregister(struct phy_device *phydev);
23extern int fixed_phy_set_link_update(struct phy_device *phydev, 23extern int fixed_phy_set_link_update(struct phy_device *phydev,
24 int (*link_update)(struct net_device *, 24 int (*link_update)(struct net_device *,
25 struct fixed_phy_status *)); 25 struct fixed_phy_status *));
@@ -40,9 +40,8 @@ static inline struct phy_device *fixed_phy_register(unsigned int irq,
40{ 40{
41 return ERR_PTR(-ENODEV); 41 return ERR_PTR(-ENODEV);
42} 42}
43static inline int fixed_phy_del(int phy_addr) 43static inline void fixed_phy_unregister(struct phy_device *phydev)
44{ 44{
45 return -ENODEV;
46} 45}
47static inline int fixed_phy_set_link_update(struct phy_device *phydev, 46static inline int fixed_phy_set_link_update(struct phy_device *phydev,
48 int (*link_update)(struct net_device *, 47 int (*link_update)(struct net_device *,
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
new file mode 100644
index 000000000000..1d405a2b7272
--- /dev/null
+++ b/include/linux/pkeys.h
@@ -0,0 +1,33 @@
1#ifndef _LINUX_PKEYS_H
2#define _LINUX_PKEYS_H
3
4#include <linux/mm_types.h>
5#include <asm/mmu_context.h>
6
7#define PKEY_DISABLE_ACCESS 0x1
8#define PKEY_DISABLE_WRITE 0x2
9#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
10 PKEY_DISABLE_WRITE)
11
12#ifdef CONFIG_ARCH_HAS_PKEYS
13#include <asm/pkeys.h>
14#else /* ! CONFIG_ARCH_HAS_PKEYS */
15#define arch_max_pkey() (1)
16#define execute_only_pkey(mm) (0)
17#define arch_override_mprotect_pkey(vma, prot, pkey) (0)
18#define PKEY_DEDICATED_EXECUTE_ONLY 0
19#endif /* ! CONFIG_ARCH_HAS_PKEYS */
20
21/*
22 * This is called from mprotect_pkey().
23 *
24 * Returns true if the protection keys is valid.
25 */
26static inline bool validate_pkey(int pkey)
27{
28 if (pkey < 0)
29 return false;
30 return (pkey < arch_max_pkey());
31}
32
33#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h
new file mode 100644
index 000000000000..7bd8ed7d978e
--- /dev/null
+++ b/include/linux/platform_data/ad5761.h
@@ -0,0 +1,44 @@
1/*
2 * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
3 *
4 * Copyright 2016 Qtechnology A/S
5 * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
6 *
7 * Licensed under the GPL-2.
8 */
9#ifndef __LINUX_PLATFORM_DATA_AD5761_H__
10#define __LINUX_PLATFORM_DATA_AD5761_H__
11
12/**
13 * enum ad5761_voltage_range - Voltage range the AD5761 is configured for.
14 * @AD5761_VOLTAGE_RANGE_M10V_10V: -10V to 10V
15 * @AD5761_VOLTAGE_RANGE_0V_10V: 0V to 10V
16 * @AD5761_VOLTAGE_RANGE_M5V_5V: -5V to 5V
17 * @AD5761_VOLTAGE_RANGE_0V_5V: 0V to 5V
18 * @AD5761_VOLTAGE_RANGE_M2V5_7V5: -2.5V to 7.5V
19 * @AD5761_VOLTAGE_RANGE_M3V_3V: -3V to 3V
20 * @AD5761_VOLTAGE_RANGE_0V_16V: 0V to 16V
21 * @AD5761_VOLTAGE_RANGE_0V_20V: 0V to 20V
22 */
23
24enum ad5761_voltage_range {
25 AD5761_VOLTAGE_RANGE_M10V_10V,
26 AD5761_VOLTAGE_RANGE_0V_10V,
27 AD5761_VOLTAGE_RANGE_M5V_5V,
28 AD5761_VOLTAGE_RANGE_0V_5V,
29 AD5761_VOLTAGE_RANGE_M2V5_7V5,
30 AD5761_VOLTAGE_RANGE_M3V_3V,
31 AD5761_VOLTAGE_RANGE_0V_16V,
32 AD5761_VOLTAGE_RANGE_0V_20V,
33};
34
35/**
36 * struct ad5761_platform_data - AD5761 DAC driver platform data
37 * @voltage_range: Voltage range the AD5761 is configured for
38 */
39
40struct ad5761_platform_data {
41 enum ad5761_voltage_range voltage_range;
42};
43
44#endif
diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h
index a81766cae230..9db1b905df24 100644
--- a/include/linux/platform_data/adau17x1.h
+++ b/include/linux/platform_data/adau17x1.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961/ADAU1781/ADAU1781 codecs 2 * Driver for ADAU1361/ADAU1461/ADAU1761/ADAU1961/ADAU1381/ADAU1781 codecs
3 * 3 *
4 * Copyright 2011-2014 Analog Devices Inc. 4 * Copyright 2011-2014 Analog Devices Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de> 5 * Author: Lars-Peter Clausen <lars@metafoo.de>
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index c42aa89d34ee..dc9a13e5acda 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -9,7 +9,7 @@
9#define _LINUX_AT24_H 9#define _LINUX_AT24_H
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/memory.h> 12#include <linux/nvmem-consumer.h>
13 13
14/** 14/**
15 * struct at24_platform_data - data to set up at24 (generic eeprom) driver 15 * struct at24_platform_data - data to set up at24 (generic eeprom) driver
@@ -17,7 +17,7 @@
17 * @page_size: number of byte which can be written in one go 17 * @page_size: number of byte which can be written in one go
18 * @flags: tunable options, check AT24_FLAG_* defines 18 * @flags: tunable options, check AT24_FLAG_* defines
19 * @setup: an optional callback invoked after eeprom is probed; enables kernel 19 * @setup: an optional callback invoked after eeprom is probed; enables kernel
20 code to access eeprom via memory_accessor, see example 20 code to access eeprom via nvmem, see example
21 * @context: optional parameter passed to setup() 21 * @context: optional parameter passed to setup()
22 * 22 *
23 * If you set up a custom eeprom type, please double-check the parameters. 23 * If you set up a custom eeprom type, please double-check the parameters.
@@ -26,13 +26,13 @@
26 * 26 *
27 * An example in pseudo code for a setup() callback: 27 * An example in pseudo code for a setup() callback:
28 * 28 *
29 * void get_mac_addr(struct memory_accessor *mem_acc, void *context) 29 * void get_mac_addr(struct mvmem_device *nvmem, void *context)
30 * { 30 * {
31 * u8 *mac_addr = ethernet_pdata->mac_addr; 31 * u8 *mac_addr = ethernet_pdata->mac_addr;
32 * off_t offset = context; 32 * off_t offset = context;
33 * 33 *
34 * // Read MAC addr from EEPROM 34 * // Read MAC addr from EEPROM
35 * if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN) 35 * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
36 * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); 36 * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
37 * } 37 * }
38 * 38 *
@@ -48,7 +48,7 @@ struct at24_platform_data {
48#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ 48#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
49#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ 49#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
50 50
51 void (*setup)(struct memory_accessor *, void *context); 51 void (*setup)(struct nvmem_device *nvmem, void *context);
52 void *context; 52 void *context;
53}; 53};
54 54
diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h
deleted file mode 100644
index e75dcbf2b230..000000000000
--- a/include/linux/platform_data/brcmfmac-sdio.h
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _LINUX_BRCMFMAC_PLATFORM_H
18#define _LINUX_BRCMFMAC_PLATFORM_H
19
20/*
21 * Platform specific driver functions and data. Through the platform specific
22 * device data functions can be provided to help the brcmfmac driver to
23 * operate with the device in combination with the used platform.
24 *
25 * Use the platform data in the following (similar) way:
26 *
27 *
28#include <brcmfmac_platform.h>
29
30
31static void brcmfmac_power_on(void)
32{
33}
34
35static void brcmfmac_power_off(void)
36{
37}
38
39static void brcmfmac_reset(void)
40{
41}
42
43static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = {
44 .power_on = brcmfmac_power_on,
45 .power_off = brcmfmac_power_off,
46 .reset = brcmfmac_reset
47};
48
49static struct platform_device brcmfmac_device = {
50 .name = BRCMFMAC_SDIO_PDATA_NAME,
51 .id = PLATFORM_DEVID_NONE,
52 .dev.platform_data = &brcmfmac_sdio_pdata
53};
54
55void __init brcmfmac_init_pdata(void)
56{
57 brcmfmac_sdio_pdata.oob_irq_supported = true;
58 brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB);
59 brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ |
60 IORESOURCE_IRQ_HIGHLEVEL;
61 platform_device_register(&brcmfmac_device);
62}
63 *
64 *
65 * Note: the brcmfmac can be loaded as module or be statically built-in into
66 * the kernel. If built-in then do note that it uses module_init (and
67 * module_exit) routines which equal device_initcall. So if you intend to
68 * create a module with the platform specific data for the brcmfmac and have
69 * it built-in to the kernel then use a higher initcall then device_initcall
70 * (see init.h). If this is not done then brcmfmac will load without problems
71 * but will not pickup the platform data.
72 *
73 * When the driver does not "detect" platform driver data then it will continue
74 * without reporting anything and just assume there is no data needed. Which is
75 * probably true for most platforms.
76 *
77 * Explanation of the platform_data fields:
78 *
79 * drive_strength: is the preferred drive_strength to be used for the SDIO
80 * pins. If 0 then a default value will be used. This is the target drive
81 * strength, the exact drive strength which will be used depends on the
82 * capabilities of the device.
83 *
84 * oob_irq_supported: does the board have support for OOB interrupts. SDIO
85 * in-band interrupts are relatively slow and for having less overhead on
86 * interrupt processing an out of band interrupt can be used. If the HW
87 * supports this then enable this by setting this field to true and configure
88 * the oob related fields.
89 *
90 * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are
91 * used for registering the irq using request_irq function.
92 *
93 * broken_sg_support: flag for broken sg list support of SDIO host controller.
94 * Set this to true if the SDIO host controller has higher align requirement
95 * than 32 bytes for each scatterlist item.
96 *
97 * sd_head_align: alignment requirement for start of data buffer
98 *
99 * sd_sgentry_align: length alignment requirement for each sg entry
100 *
101 * power_on: This function is called by the brcmfmac when the module gets
102 * loaded. This can be particularly useful for low power devices. The platform
103 * spcific routine may for example decide to power up the complete device.
104 * If there is no use-case for this function then provide NULL.
105 *
106 * power_off: This function is called by the brcmfmac when the module gets
107 * unloaded. At this point the device can be powered down or otherwise be reset.
108 * So if an actual power_off is not supported but reset is then reset the device
109 * when this function gets called. This can be particularly useful for low power
110 * devices. If there is no use-case for this function (either power-down or
111 * reset) then provide NULL.
112 *
113 * reset: This function can get called if the device communication broke down.
114 * This functionality is particularly useful in case of SDIO type devices. It is
115 * possible to reset a dongle via sdio data interface, but it requires that
116 * this is fully functional. This function is chip/module specific and this
117 * function should return only after the complete reset has completed.
118 */
119
120#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio"
121
122struct brcmfmac_sdio_platform_data {
123 unsigned int drive_strength;
124 bool oob_irq_supported;
125 unsigned int oob_irq_nr;
126 unsigned long oob_irq_flags;
127 bool broken_sg_support;
128 unsigned short sd_head_align;
129 unsigned short sd_sgentry_align;
130 void (*power_on)(void);
131 void (*power_off)(void);
132 void (*reset)(void);
133};
134
135#endif /* _LINUX_BRCMFMAC_PLATFORM_H */
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
new file mode 100644
index 000000000000..1d30bf278231
--- /dev/null
+++ b/include/linux/platform_data/brcmfmac.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 201 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _LINUX_BRCMFMAC_PLATFORM_H
18#define _LINUX_BRCMFMAC_PLATFORM_H
19
20
21#define BRCMFMAC_PDATA_NAME "brcmfmac"
22
23#define BRCMFMAC_COUNTRY_BUF_SZ 4
24
25
26/*
27 * Platform specific driver functions and data. Through the platform specific
28 * device data functions and data can be provided to help the brcmfmac driver to
29 * operate with the device in combination with the used platform.
30 */
31
32
33/**
34 * Note: the brcmfmac can be loaded as module or be statically built-in into
35 * the kernel. If built-in then do note that it uses module_init (and
36 * module_exit) routines which equal device_initcall. So if you intend to
37 * create a module with the platform specific data for the brcmfmac and have
38 * it built-in to the kernel then use a higher initcall then device_initcall
39 * (see init.h). If this is not done then brcmfmac will load without problems
40 * but will not pickup the platform data.
41 *
42 * When the driver does not "detect" platform driver data then it will continue
43 * without reporting anything and just assume there is no data needed. Which is
44 * probably true for most platforms.
45 */
46
47/**
48 * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are
49 * supported.
50 */
51enum brcmf_bus_type {
52 BRCMF_BUSTYPE_SDIO,
53 BRCMF_BUSTYPE_USB,
54 BRCMF_BUSTYPE_PCIE
55};
56
57
58/**
59 * struct brcmfmac_sdio_pd - SDIO Device specific platform data.
60 *
61 * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be
62 * used.
63 * @drive_strength: is the preferred drive_strength to be used for the SDIO
64 * pins. If 0 then a default value will be used. This is
65 * the target drive strength, the exact drive strength
66 * which will be used depends on the capabilities of the
67 * device.
68 * @oob_irq_supported: does the board have support for OOB interrupts. SDIO
69 * in-band interrupts are relatively slow and for having
70 * less overhead on interrupt processing an out of band
71 * interrupt can be used. If the HW supports this then
72 * enable this by setting this field to true and configure
73 * the oob related fields.
74 * @oob_irq_nr,
75 * @oob_irq_flags: the OOB interrupt information. The values are used for
76 * registering the irq using request_irq function.
77 * @broken_sg_support: flag for broken sg list support of SDIO host controller.
78 * Set this to true if the SDIO host controller has higher
79 * align requirement than 32 bytes for each scatterlist
80 * item.
81 * @sd_head_align: alignment requirement for start of data buffer.
82 * @sd_sgentry_align: length alignment requirement for each sg entry.
83 * @reset: This function can get called if the device communication
84 * broke down. This functionality is particularly useful in
85 * case of SDIO type devices. It is possible to reset a
86 * dongle via sdio data interface, but it requires that
87 * this is fully functional. This function is chip/module
88 * specific and this function should return only after the
89 * complete reset has completed.
90 */
91struct brcmfmac_sdio_pd {
92 int txglomsz;
93 unsigned int drive_strength;
94 bool oob_irq_supported;
95 unsigned int oob_irq_nr;
96 unsigned long oob_irq_flags;
97 bool broken_sg_support;
98 unsigned short sd_head_align;
99 unsigned short sd_sgentry_align;
100 void (*reset)(void);
101};
102
103/**
104 * struct brcmfmac_pd_cc_entry - Struct for translating user space country code
105 * (iso3166) to firmware country code and
106 * revision.
107 *
108 * @iso3166: iso3166 alpha 2 country code string.
109 * @cc: firmware country code string.
110 * @rev: firmware country code revision.
111 */
112struct brcmfmac_pd_cc_entry {
113 char iso3166[BRCMFMAC_COUNTRY_BUF_SZ];
114 char cc[BRCMFMAC_COUNTRY_BUF_SZ];
115 s32 rev;
116};
117
118/**
119 * struct brcmfmac_pd_cc - Struct for translating country codes as set by user
120 * space to a country code and rev which can be used by
121 * firmware.
122 *
123 * @table_size: number of entries in table (> 0)
124 * @table: array of 1 or more elements with translation information.
125 */
126struct brcmfmac_pd_cc {
127 int table_size;
128 struct brcmfmac_pd_cc_entry table[0];
129};
130
131/**
132 * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type)
133 * is the unique identifier of the device.
134 *
135 * @id: ID of the device for which this data is. In case of SDIO
136 * or PCIE this is the chipid as identified by chip.c In
137 * case of USB this is the chipid as identified by the
138 * device query.
139 * @rev: chip revision, see id.
140 * @bus_type: The type of bus. Some chipid/rev exist for different bus
141 * types. Each bus type has its own set of settings.
142 * @feature_disable: Bitmask of features to disable (override), See feature.c
143 * in brcmfmac for details.
144 * @country_codes: If available, pointer to struct for translating country
145 * codes.
146 * @bus: Bus specific (union) device settings. Currently only
147 * SDIO.
148 */
149struct brcmfmac_pd_device {
150 unsigned int id;
151 unsigned int rev;
152 enum brcmf_bus_type bus_type;
153 unsigned int feature_disable;
154 struct brcmfmac_pd_cc *country_codes;
155 union {
156 struct brcmfmac_sdio_pd sdio;
157 } bus;
158};
159
160/**
161 * struct brcmfmac_platform_data - BRCMFMAC specific platform data.
162 *
163 * @power_on: This function is called by the brcmfmac driver when the module
164 * gets loaded. This can be particularly useful for low power
165 * devices. The platform spcific routine may for example decide to
166 * power up the complete device. If there is no use-case for this
167 * function then provide NULL.
168 * @power_off: This function is called by the brcmfmac when the module gets
169 * unloaded. At this point the devices can be powered down or
170 * otherwise be reset. So if an actual power_off is not supported
171 * but reset is supported by the devices then reset the devices
172 * when this function gets called. This can be particularly useful
173 * for low power devices. If there is no use-case for this
174 * function then provide NULL.
175 */
176struct brcmfmac_platform_data {
177 void (*power_on)(void);
178 void (*power_off)(void);
179 char *fw_alternative_path;
180 int device_count;
181 struct brcmfmac_pd_device devices[0];
182};
183
184
185#endif /* _LINUX_BRCMFMAC_PLATFORM_H */
diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h
deleted file mode 100644
index ca13992089b8..000000000000
--- a/include/linux/platform_data/microread.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Driver include for the Inside Secure microread NFC Chip.
3 *
4 * Copyright (C) 2011 Tieto Poland
5 * Copyright (C) 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _MICROREAD_H
22#define _MICROREAD_H
23
24#include <linux/i2c.h>
25
26#define MICROREAD_DRIVER_NAME "microread"
27
28/* board config platform data for microread */
29struct microread_nfc_platform_data {
30 unsigned int rst_gpio;
31 unsigned int irq_gpio;
32 unsigned int ioh_gpio;
33};
34
35#endif /* _MICROREAD_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index 2a330ec9e2af..d1397c8ed94e 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -14,6 +14,7 @@
14 14
15struct mmp_dma_platdata { 15struct mmp_dma_platdata {
16 int dma_channels; 16 int dma_channels;
17 int nb_requestors;
17}; 18};
18 19
19#endif /* MMP_DMA_H */ 20#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index 36bb92172f47..c55e42ee57fa 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -40,7 +40,6 @@ struct s3c2410_nand_set {
40 char *name; 40 char *name;
41 int *nr_map; 41 int *nr_map;
42 struct mtd_partition *partitions; 42 struct mtd_partition *partitions;
43 struct nand_ecclayout *ecc_layout;
44}; 43};
45 44
46struct s3c2410_platform_nand { 45struct s3c2410_platform_nand {
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index aed170588b74..698d0d59db76 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -28,6 +28,7 @@ enum ntc_thermistor_type {
28 TYPE_NCPXXWL333, 28 TYPE_NCPXXWL333,
29 TYPE_B57330V2103, 29 TYPE_B57330V2103,
30 TYPE_NCPXXWF104, 30 TYPE_NCPXXWF104,
31 TYPE_NCPXXXH103,
31}; 32};
32 33
33struct ntc_thermistor_platform_data { 34struct ntc_thermistor_platform_data {
diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h
index 4504d5d592f0..009e1d83fe39 100644
--- a/include/linux/platform_data/sa11x0-serial.h
+++ b/include/linux/platform_data/sa11x0-serial.h
@@ -26,8 +26,12 @@ struct sa1100_port_fns {
26void sa1100_register_uart_fns(struct sa1100_port_fns *fns); 26void sa1100_register_uart_fns(struct sa1100_port_fns *fns);
27void sa1100_register_uart(int idx, int port); 27void sa1100_register_uart(int idx, int port);
28#else 28#else
29#define sa1100_register_uart_fns(fns) do { } while (0) 29static inline void sa1100_register_uart_fns(struct sa1100_port_fns *fns)
30#define sa1100_register_uart(idx,port) do { } while (0) 30{
31}
32static inline void sa1100_register_uart(int idx, int port)
33{
34}
31#endif 35#endif
32 36
33#endif 37#endif
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
index d09275f3cde3..2ba2c34ca3d3 100644
--- a/include/linux/platform_data/serial-omap.h
+++ b/include/linux/platform_data/serial-omap.h
@@ -21,7 +21,7 @@
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/pm_qos.h> 22#include <linux/pm_qos.h>
23 23
24#define DRIVER_NAME "omap_uart" 24#define OMAP_SERIAL_DRIVER_NAME "omap_uart"
25 25
26/* 26/*
27 * Use tty device name as ttyO, [O -> OMAP] 27 * Use tty device name as ttyO, [O -> OMAP]
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 25266c600021..308d6044f153 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -42,7 +42,9 @@ extern int pm_clk_create(struct device *dev);
42extern void pm_clk_destroy(struct device *dev); 42extern void pm_clk_destroy(struct device *dev);
43extern int pm_clk_add(struct device *dev, const char *con_id); 43extern int pm_clk_add(struct device *dev, const char *con_id);
44extern int pm_clk_add_clk(struct device *dev, struct clk *clk); 44extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
45extern int of_pm_clk_add_clks(struct device *dev);
45extern void pm_clk_remove(struct device *dev, const char *con_id); 46extern void pm_clk_remove(struct device *dev, const char *con_id);
47extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
46extern int pm_clk_suspend(struct device *dev); 48extern int pm_clk_suspend(struct device *dev);
47extern int pm_clk_resume(struct device *dev); 49extern int pm_clk_resume(struct device *dev);
48#else 50#else
@@ -69,11 +71,18 @@ static inline int pm_clk_add_clk(struct device *dev, struct clk *clk)
69{ 71{
70 return -EINVAL; 72 return -EINVAL;
71} 73}
74static inline int of_pm_clk_add_clks(struct device *dev)
75{
76 return -EINVAL;
77}
72static inline void pm_clk_remove(struct device *dev, const char *con_id) 78static inline void pm_clk_remove(struct device *dev, const char *con_id)
73{ 79{
74} 80}
75#define pm_clk_suspend NULL 81#define pm_clk_suspend NULL
76#define pm_clk_resume NULL 82#define pm_clk_resume NULL
83static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
84{
85}
77#endif 86#endif
78 87
79#ifdef CONFIG_HAVE_CLK 88#ifdef CONFIG_HAVE_CLK
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index db21d3995f7e..49cd8890b873 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -19,6 +19,8 @@
19/* Defines used for the flags field in the struct generic_pm_domain */ 19/* Defines used for the flags field in the struct generic_pm_domain */
20#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ 20#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
21 21
22#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */
23
22enum gpd_status { 24enum gpd_status {
23 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 25 GPD_STATE_ACTIVE = 0, /* PM domain is active */
24 GPD_STATE_POWER_OFF, /* PM domain is off */ 26 GPD_STATE_POWER_OFF, /* PM domain is off */
@@ -37,6 +39,11 @@ struct gpd_dev_ops {
37 bool (*active_wakeup)(struct device *dev); 39 bool (*active_wakeup)(struct device *dev);
38}; 40};
39 41
42struct genpd_power_state {
43 s64 power_off_latency_ns;
44 s64 power_on_latency_ns;
45};
46
40struct generic_pm_domain { 47struct generic_pm_domain {
41 struct dev_pm_domain domain; /* PM domain operations */ 48 struct dev_pm_domain domain; /* PM domain operations */
42 struct list_head gpd_list_node; /* Node in the global PM domains list */ 49 struct list_head gpd_list_node; /* Node in the global PM domains list */
@@ -54,9 +61,7 @@ struct generic_pm_domain {
54 unsigned int prepared_count; /* Suspend counter of prepared devices */ 61 unsigned int prepared_count; /* Suspend counter of prepared devices */
55 bool suspend_power_off; /* Power status before system suspend */ 62 bool suspend_power_off; /* Power status before system suspend */
56 int (*power_off)(struct generic_pm_domain *domain); 63 int (*power_off)(struct generic_pm_domain *domain);
57 s64 power_off_latency_ns;
58 int (*power_on)(struct generic_pm_domain *domain); 64 int (*power_on)(struct generic_pm_domain *domain);
59 s64 power_on_latency_ns;
60 struct gpd_dev_ops dev_ops; 65 struct gpd_dev_ops dev_ops;
61 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ 66 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
62 bool max_off_time_changed; 67 bool max_off_time_changed;
@@ -66,6 +71,10 @@ struct generic_pm_domain {
66 void (*detach_dev)(struct generic_pm_domain *domain, 71 void (*detach_dev)(struct generic_pm_domain *domain,
67 struct device *dev); 72 struct device *dev);
68 unsigned int flags; /* Bit field of configs for genpd */ 73 unsigned int flags; /* Bit field of configs for genpd */
74 struct genpd_power_state states[GENPD_MAX_NUM_STATES];
75 unsigned int state_count; /* number of states */
76 unsigned int state_idx; /* state that genpd will go to when off */
77
69}; 78};
70 79
71static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 80static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 95403d2ccaf5..cccaf4a29e9f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
34 34
35int dev_pm_opp_get_opp_count(struct device *dev); 35int dev_pm_opp_get_opp_count(struct device *dev);
36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); 36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
37unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
38unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
37struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev); 39struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
38 40
39struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 41struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -60,6 +62,9 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
60void dev_pm_opp_put_supported_hw(struct device *dev); 62void dev_pm_opp_put_supported_hw(struct device *dev);
61int dev_pm_opp_set_prop_name(struct device *dev, const char *name); 63int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
62void dev_pm_opp_put_prop_name(struct device *dev); 64void dev_pm_opp_put_prop_name(struct device *dev);
65int dev_pm_opp_set_regulator(struct device *dev, const char *name);
66void dev_pm_opp_put_regulator(struct device *dev);
67int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
63#else 68#else
64static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 69static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
65{ 70{
@@ -86,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
86 return 0; 91 return 0;
87} 92}
88 93
94static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
95{
96 return 0;
97}
98
99static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
100{
101 return 0;
102}
103
89static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) 104static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
90{ 105{
91 return NULL; 106 return NULL;
@@ -151,6 +166,18 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
151 166
152static inline void dev_pm_opp_put_prop_name(struct device *dev) {} 167static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
153 168
169static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
170{
171 return -EINVAL;
172}
173
174static inline void dev_pm_opp_put_regulator(struct device *dev) {}
175
176static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
177{
178 return -EINVAL;
179}
180
154#endif /* CONFIG_PM_OPP */ 181#endif /* CONFIG_PM_OPP */
155 182
156#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 183#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 7c3d11a6b4ad..57d146fe44dd 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -42,6 +42,13 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
42 BUG(); 42 BUG();
43} 43}
44 44
45static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
46 size_t n)
47{
48 BUG();
49 return -EFAULT;
50}
51
45static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, 52static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
46 struct iov_iter *i) 53 struct iov_iter *i)
47{ 54{
@@ -58,24 +65,42 @@ static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
58{ 65{
59 BUG(); 66 BUG();
60} 67}
61#endif
62 68
63/* 69static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
64 * Architectures that define ARCH_HAS_PMEM_API must provide
65 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
66 * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem()
67 * and arch_has_wmb_pmem().
68 */
69static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
70{ 70{
71 memcpy(dst, (void __force const *) src, size); 71 BUG();
72} 72}
73#endif
73 74
74static inline bool arch_has_pmem_api(void) 75static inline bool arch_has_pmem_api(void)
75{ 76{
76 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); 77 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
77} 78}
78 79
80static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
81 size_t size)
82{
83 memcpy(dst, (void __force *) src, size);
84 return 0;
85}
86
87/*
88 * memcpy_from_pmem - read from persistent memory with error handling
89 * @dst: destination buffer
90 * @src: source buffer
91 * @size: transfer length
92 *
93 * Returns 0 on success negative error code on failure.
94 */
95static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
96 size_t size)
97{
98 if (arch_has_pmem_api())
99 return arch_memcpy_from_pmem(dst, src, size);
100 else
101 return default_memcpy_from_pmem(dst, src, size);
102}
103
79/** 104/**
80 * arch_has_wmb_pmem - true if wmb_pmem() ensures durability 105 * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
81 * 106 *
@@ -186,6 +211,20 @@ static inline void clear_pmem(void __pmem *addr, size_t size)
186} 211}
187 212
188/** 213/**
214 * invalidate_pmem - flush a pmem range from the cache hierarchy
215 * @addr: virtual start address
216 * @size: bytes to invalidate (internally aligned to cache line size)
217 *
218 * For platforms that support clearing poison this flushes any poisoned
219 * ranges out of the cache
220 */
221static inline void invalidate_pmem(void __pmem *addr, size_t size)
222{
223 if (arch_has_pmem_api())
224 arch_invalidate_pmem(addr, size);
225}
226
227/**
189 * wb_cache_pmem - write back processor cache for PMEM memory range 228 * wb_cache_pmem - write back processor cache for PMEM memory range
190 * @addr: virtual start address 229 * @addr: virtual start address
191 * @size: number of bytes to write back 230 * @size: number of bytes to write back
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 4a27153574e2..51334edec506 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -30,7 +30,11 @@
30#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) 30#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
31 31
32/********** mm/debug-pagealloc.c **********/ 32/********** mm/debug-pagealloc.c **********/
33#ifdef CONFIG_PAGE_POISONING_ZERO
34#define PAGE_POISON 0x00
35#else
33#define PAGE_POISON 0xaa 36#define PAGE_POISON 0xaa
37#endif
34 38
35/********** mm/page_alloc.c ************/ 39/********** mm/page_alloc.c ************/
36 40
diff --git a/include/linux/poll.h b/include/linux/poll.h
index c08386fb3e08..9fb4f40d9a26 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
96extern void poll_freewait(struct poll_wqueues *pwq); 96extern void poll_freewait(struct poll_wqueues *pwq);
97extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 97extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
98 ktime_t *expires, unsigned long slack); 98 ktime_t *expires, unsigned long slack);
99extern long select_estimate_accuracy(struct timespec *tv); 99extern u64 select_estimate_accuracy(struct timespec *tv);
100 100
101 101
102static inline int poll_schedule(struct poll_wqueues *pwq, int state) 102static inline int poll_schedule(struct poll_wqueues *pwq, int state)
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 907f3fd191ac..62d44c176071 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -128,9 +128,6 @@ void posix_cpu_timer_schedule(struct k_itimer *timer);
128void run_posix_cpu_timers(struct task_struct *task); 128void run_posix_cpu_timers(struct task_struct *task);
129void posix_cpu_timers_exit(struct task_struct *task); 129void posix_cpu_timers_exit(struct task_struct *task);
130void posix_cpu_timers_exit_group(struct task_struct *task); 130void posix_cpu_timers_exit_group(struct task_struct *task);
131
132bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk);
133
134void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, 131void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
135 cputime_t *newval, cputime_t *oldval); 132 cputime_t *newval, cputime_t *oldval);
136 133
diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h
index f536164a6069..6b750c1a45fa 100644
--- a/include/linux/power/bq24735-charger.h
+++ b/include/linux/power/bq24735-charger.h
@@ -32,6 +32,8 @@ struct bq24735_platform {
32 int status_gpio_active_low; 32 int status_gpio_active_low;
33 bool status_gpio_valid; 33 bool status_gpio_valid;
34 34
35 bool ext_control;
36
35 char **supplied_to; 37 char **supplied_to;
36 size_t num_supplicants; 38 size_t num_supplicants;
37}; 39};
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ef9f1592185d..751061790626 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -163,6 +163,9 @@ enum power_supply_type {
163 POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ 163 POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
164 POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ 164 POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
165 POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ 165 POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
166 POWER_SUPPLY_TYPE_USB_TYPE_C, /* Type C Port */
167 POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */
168 POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */
166}; 169};
167 170
168enum power_supply_notifier_events { 171enum power_supply_notifier_events {
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 54bf1484d41f..35ac903956c7 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -111,22 +111,17 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
111 kt->nsec = ts.tv_nsec; 111 kt->nsec = ts.tv_nsec;
112} 112}
113 113
114#ifdef CONFIG_NTP_PPS
115
116static inline void pps_get_ts(struct pps_event_time *ts) 114static inline void pps_get_ts(struct pps_event_time *ts)
117{ 115{
118 ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); 116 struct system_time_snapshot snap;
119}
120 117
121#else /* CONFIG_NTP_PPS */ 118 ktime_get_snapshot(&snap);
122 119 ts->ts_real = ktime_to_timespec64(snap.real);
123static inline void pps_get_ts(struct pps_event_time *ts) 120#ifdef CONFIG_NTP_PPS
124{ 121 ts->ts_raw = ktime_to_timespec64(snap.raw);
125 ktime_get_real_ts64(&ts->ts_real); 122#endif
126} 123}
127 124
128#endif /* CONFIG_NTP_PPS */
129
130/* Subtract known time delay from PPS event time(s) */ 125/* Subtract known time delay from PPS event time(s) */
131static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) 126static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
132{ 127{
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 42dfc615dbf8..de0e7719d4c5 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -9,6 +9,8 @@
9struct pid_namespace; 9struct pid_namespace;
10struct nsproxy; 10struct nsproxy;
11struct path; 11struct path;
12struct task_struct;
13struct inode;
12 14
13struct proc_ns_operations { 15struct proc_ns_operations {
14 const char *name; 16 const char *name;
@@ -24,6 +26,7 @@ extern const struct proc_ns_operations ipcns_operations;
24extern const struct proc_ns_operations pidns_operations; 26extern const struct proc_ns_operations pidns_operations;
25extern const struct proc_ns_operations userns_operations; 27extern const struct proc_ns_operations userns_operations;
26extern const struct proc_ns_operations mntns_operations; 28extern const struct proc_ns_operations mntns_operations;
29extern const struct proc_ns_operations cgroupns_operations;
27 30
28/* 31/*
29 * We always define these enumerators 32 * We always define these enumerators
@@ -34,6 +37,7 @@ enum {
34 PROC_UTS_INIT_INO = 0xEFFFFFFEU, 37 PROC_UTS_INIT_INO = 0xEFFFFFFEU,
35 PROC_USER_INIT_INO = 0xEFFFFFFDU, 38 PROC_USER_INIT_INO = 0xEFFFFFFDU,
36 PROC_PID_INIT_INO = 0xEFFFFFFCU, 39 PROC_PID_INIT_INO = 0xEFFFFFFCU,
40 PROC_CGROUP_INIT_INO = 0xEFFFFFFBU,
37}; 41};
38 42
39#ifdef CONFIG_PROC_FS 43#ifdef CONFIG_PROC_FS
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 12c4865457ad..393efe2edf9a 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
24bool psci_power_state_loses_context(u32 state); 24bool psci_power_state_loses_context(u32 state);
25bool psci_power_state_is_valid(u32 state); 25bool psci_power_state_is_valid(u32 state);
26 26
27int psci_cpu_init_idle(unsigned int cpu);
28int psci_cpu_suspend_enter(unsigned long index);
29
27struct psci_operations { 30struct psci_operations {
28 int (*cpu_suspend)(u32 state, unsigned long entry_point); 31 int (*cpu_suspend)(u32 state, unsigned long entry_point);
29 int (*cpu_off)(u32 state); 32 int (*cpu_off)(u32 state);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9c9d6c154c8e..4660aaa3195e 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -76,7 +76,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
76 76
77struct ramoops_platform_data { 77struct ramoops_platform_data {
78 unsigned long mem_size; 78 unsigned long mem_size;
79 unsigned long mem_address; 79 phys_addr_t mem_address;
80 unsigned int mem_type; 80 unsigned int mem_type;
81 unsigned long record_size; 81 unsigned long record_size;
82 unsigned long console_size; 82 unsigned long console_size;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index b8b73066d137..6b15e168148a 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -38,6 +38,7 @@ struct ptp_clock_request {
38 }; 38 };
39}; 39};
40 40
41struct system_device_crosststamp;
41/** 42/**
42 * struct ptp_clock_info - decribes a PTP hardware clock 43 * struct ptp_clock_info - decribes a PTP hardware clock
43 * 44 *
@@ -67,6 +68,11 @@ struct ptp_clock_request {
67 * @gettime64: Reads the current time from the hardware clock. 68 * @gettime64: Reads the current time from the hardware clock.
68 * parameter ts: Holds the result. 69 * parameter ts: Holds the result.
69 * 70 *
71 * @getcrosststamp: Reads the current time from the hardware clock and
72 * system clock simultaneously.
73 * parameter cts: Contains timestamp (device,system) pair,
74 * where system time is realtime and monotonic.
75 *
70 * @settime64: Set the current time on the hardware clock. 76 * @settime64: Set the current time on the hardware clock.
71 * parameter ts: Time value to set. 77 * parameter ts: Time value to set.
72 * 78 *
@@ -105,6 +111,8 @@ struct ptp_clock_info {
105 int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); 111 int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
106 int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); 112 int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
107 int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); 113 int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
114 int (*getcrosststamp)(struct ptp_clock_info *ptp,
115 struct system_device_crosststamp *cts);
108 int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); 116 int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
109 int (*enable)(struct ptp_clock_info *ptp, 117 int (*enable)(struct ptp_clock_info *ptp,
110 struct ptp_clock_request *request, int on); 118 struct ptp_clock_request *request, int on);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index c2f2574ff61c..2a097d176ba9 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
197 QUARK_X1000_SSP, 197 QUARK_X1000_SSP,
198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ 198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
199 LPSS_BYT_SSP, 199 LPSS_BYT_SSP,
200 LPSS_BSW_SSP,
200 LPSS_SPT_SSP, 201 LPSS_SPT_SSP,
201 LPSS_BXT_SSP, 202 LPSS_BXT_SSP,
202}; 203};
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 1d1ba2c5ee7a..53ecb37ae563 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -11,9 +11,11 @@
11 11
12#define CORE_SPQE_PAGE_SIZE_BYTES 4096 12#define CORE_SPQE_PAGE_SIZE_BYTES 4096
13 13
14#define X_FINAL_CLEANUP_AGG_INT 1
15
14#define FW_MAJOR_VERSION 8 16#define FW_MAJOR_VERSION 8
15#define FW_MINOR_VERSION 4 17#define FW_MINOR_VERSION 7
16#define FW_REVISION_VERSION 2 18#define FW_REVISION_VERSION 3
17#define FW_ENGINEERING_VERSION 0 19#define FW_ENGINEERING_VERSION 0
18 20
19/***********************/ 21/***********************/
@@ -152,6 +154,9 @@
152/* number of queues in a PF queue group */ 154/* number of queues in a PF queue group */
153#define QM_PF_QUEUE_GROUP_SIZE 8 155#define QM_PF_QUEUE_GROUP_SIZE 8
154 156
157/* the size of a single queue element in bytes */
158#define QM_PQ_ELEMENT_SIZE 4
159
155/* base number of Tx PQs in the CM PQ representation. 160/* base number of Tx PQs in the CM PQ representation.
156 * should be used when storing PQ IDs in CM PQ registers and context 161 * should be used when storing PQ IDs in CM PQ registers and context
157 */ 162 */
@@ -285,6 +290,16 @@
285#define PXP_NUM_ILT_RECORDS_K2 11000 290#define PXP_NUM_ILT_RECORDS_K2 11000
286#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) 291#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
287 292
293#define SDM_COMP_TYPE_NONE 0
294#define SDM_COMP_TYPE_WAKE_THREAD 1
295#define SDM_COMP_TYPE_AGG_INT 2
296#define SDM_COMP_TYPE_CM 3
297#define SDM_COMP_TYPE_LOADER 4
298#define SDM_COMP_TYPE_PXP 5
299#define SDM_COMP_TYPE_INDICATE_ERROR 6
300#define SDM_COMP_TYPE_RELEASE_THREAD 7
301#define SDM_COMP_TYPE_RAM 8
302
288/******************/ 303/******************/
289/* PBF CONSTANTS */ 304/* PBF CONSTANTS */
290/******************/ 305/******************/
@@ -335,7 +350,7 @@ struct event_ring_entry {
335 350
336/* Multi function mode */ 351/* Multi function mode */
337enum mf_mode { 352enum mf_mode {
338 SF, 353 ERROR_MODE /* Unsupported mode */,
339 MF_OVLAN, 354 MF_OVLAN,
340 MF_NPAR, 355 MF_NPAR,
341 MAX_MF_MODE 356 MAX_MF_MODE
@@ -606,4 +621,19 @@ struct status_block {
606#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 621#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
607}; 622};
608 623
624struct tunnel_parsing_flags {
625 u8 flags;
626#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
627#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
628#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
629#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
630#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
631#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
632#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
633#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
634#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
635#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
636#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
637#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
638};
609#endif /* __COMMON_HSI__ */ 639#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 320b3373ac1d..092cb0c1afcb 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -17,10 +17,8 @@
17#define ETH_MAX_RAMROD_PER_CON 8 17#define ETH_MAX_RAMROD_PER_CON 8
18#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 18#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
19#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 19#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
20#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096
21#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 20#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
22#define ETH_RX_NUM_NEXT_PAGE_BDS 2 21#define ETH_RX_NUM_NEXT_PAGE_BDS 2
23#define ETH_RX_NUM_NEXT_PAGE_SGES 2
24 22
25#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 23#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
26#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 24#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
@@ -34,7 +32,8 @@
34 32
35#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 33#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
36 34
37#define ETH_REG_CQE_PBL_SIZE 3 35/* Maximum number of buffers, used for RX packet placement */
36#define ETH_RX_MAX_BUFF_PER_PKT 5
38 37
39/* num of MAC/VLAN filters */ 38/* num of MAC/VLAN filters */
40#define ETH_NUM_MAC_FILTERS 512 39#define ETH_NUM_MAC_FILTERS 512
@@ -54,9 +53,9 @@
54 53
55/* TPA constants */ 54/* TPA constants */
56#define ETH_TPA_MAX_AGGS_NUM 64 55#define ETH_TPA_MAX_AGGS_NUM 64
57#define ETH_TPA_CQE_START_SGL_SIZE 3 56#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
58#define ETH_TPA_CQE_CONT_SGL_SIZE 6 57#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
59#define ETH_TPA_CQE_END_SGL_SIZE 4 58#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
60 59
61/* Queue Zone sizes */ 60/* Queue Zone sizes */
62#define TSTORM_QZONE_SIZE 0 61#define TSTORM_QZONE_SIZE 0
@@ -74,18 +73,18 @@ struct coalescing_timeset {
74 73
75struct eth_tx_1st_bd_flags { 74struct eth_tx_1st_bd_flags {
76 u8 bitfields; 75 u8 bitfields;
76#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
77#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
77#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 78#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
78#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 79#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
79#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 80#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
80#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 81#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
81#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 82#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
82#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 83#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
83#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 84#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
84#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 85#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
85#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 86#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
86#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 87#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
87#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
88#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5
89#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 88#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
90#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 89#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
91#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 90#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
@@ -97,38 +96,44 @@ struct eth_tx_data_1st_bd {
97 __le16 vlan; 96 __le16 vlan;
98 u8 nbds; 97 u8 nbds;
99 struct eth_tx_1st_bd_flags bd_flags; 98 struct eth_tx_1st_bd_flags bd_flags;
100 __le16 fw_use_only; 99 __le16 bitfields;
100#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
101#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
102#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
103#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
104#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
105#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
101}; 106};
102 107
103/* The parsing information data for the second tx bd of a given packet. */ 108/* The parsing information data for the second tx bd of a given packet. */
104struct eth_tx_data_2nd_bd { 109struct eth_tx_data_2nd_bd {
105 __le16 tunn_ip_size; 110 __le16 tunn_ip_size;
106 __le16 bitfields; 111 __le16 bitfields1;
107#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
108#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
109#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
110#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
111 __le16 bitfields2;
112#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF 112#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
113#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 113#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
114#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 114#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
115#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 115#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
116#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 116#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
117#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 117#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
118#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
119#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
118#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 120#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
119#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 121#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
120#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 122#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
121#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 123#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
122#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 124#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
123#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 125#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
124#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 126#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
125#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 127#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
126#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 128#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
127#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 129#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
128#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 130#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
129#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 131#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
130#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 132 __le16 bitfields2;
131#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 133#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
134#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
135#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
136#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
132}; 137};
133 138
134/* Regular ETH Rx FP CQE. */ 139/* Regular ETH Rx FP CQE. */
@@ -145,11 +150,68 @@ struct eth_fast_path_rx_reg_cqe {
145 struct parsing_and_err_flags pars_flags; 150 struct parsing_and_err_flags pars_flags;
146 __le16 vlan_tag; 151 __le16 vlan_tag;
147 __le32 rss_hash; 152 __le32 rss_hash;
148 __le16 len_on_bd; 153 __le16 len_on_first_bd;
149 u8 placement_offset; 154 u8 placement_offset;
150 u8 reserved; 155 struct tunnel_parsing_flags tunnel_pars_flags;
151 __le16 pbl[ETH_REG_CQE_PBL_SIZE]; 156 u8 bd_num;
152 u8 reserved1[10]; 157 u8 reserved[7];
158 u32 fw_debug;
159 u8 reserved1[3];
160 u8 flags;
161#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
162#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
163#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
164#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
165#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
166#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
167};
168
169/* TPA-continue ETH Rx FP CQE. */
170struct eth_fast_path_rx_tpa_cont_cqe {
171 u8 type;
172 u8 tpa_agg_index;
173 __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
174 u8 reserved[5];
175 u8 reserved1;
176 __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
177};
178
179/* TPA-end ETH Rx FP CQE. */
180struct eth_fast_path_rx_tpa_end_cqe {
181 u8 type;
182 u8 tpa_agg_index;
183 __le16 total_packet_len;
184 u8 num_of_bds;
185 u8 end_reason;
186 __le16 num_of_coalesced_segs;
187 __le32 ts_delta;
188 __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
189 u8 reserved1[3];
190 u8 reserved2;
191 __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
192};
193
194/* TPA-start ETH Rx FP CQE. */
195struct eth_fast_path_rx_tpa_start_cqe {
196 u8 type;
197 u8 bitfields;
198#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
199#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
200#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
201#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
202#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
203#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
204 __le16 seg_len;
205 struct parsing_and_err_flags pars_flags;
206 __le16 vlan_tag;
207 __le32 rss_hash;
208 __le16 len_on_first_bd;
209 u8 placement_offset;
210 struct tunnel_parsing_flags tunnel_pars_flags;
211 u8 tpa_agg_index;
212 u8 header_len;
213 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
214 u32 fw_debug;
153}; 215};
154 216
155/* The L4 pseudo checksum mode for Ethernet */ 217/* The L4 pseudo checksum mode for Ethernet */
@@ -168,13 +230,26 @@ struct eth_slow_path_rx_cqe {
168 u8 type; 230 u8 type;
169 u8 ramrod_cmd_id; 231 u8 ramrod_cmd_id;
170 u8 error_flag; 232 u8 error_flag;
171 u8 reserved[27]; 233 u8 reserved[25];
172 __le16 echo; 234 __le16 echo;
235 u8 reserved1;
236 u8 flags;
237/* for PMD mode - valid indication */
238#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
239#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
240/* for PMD mode - valid toggle indication */
241#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
242#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
243#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
244#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
173}; 245};
174 246
175/* union for all ETH Rx CQE types */ 247/* union for all ETH Rx CQE types */
176union eth_rx_cqe { 248union eth_rx_cqe {
177 struct eth_fast_path_rx_reg_cqe fast_path_regular; 249 struct eth_fast_path_rx_reg_cqe fast_path_regular;
250 struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
251 struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
252 struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
178 struct eth_slow_path_rx_cqe slow_path; 253 struct eth_slow_path_rx_cqe slow_path;
179}; 254};
180 255
@@ -183,15 +258,18 @@ enum eth_rx_cqe_type {
183 ETH_RX_CQE_TYPE_UNUSED, 258 ETH_RX_CQE_TYPE_UNUSED,
184 ETH_RX_CQE_TYPE_REGULAR, 259 ETH_RX_CQE_TYPE_REGULAR,
185 ETH_RX_CQE_TYPE_SLOW_PATH, 260 ETH_RX_CQE_TYPE_SLOW_PATH,
261 ETH_RX_CQE_TYPE_TPA_START,
262 ETH_RX_CQE_TYPE_TPA_CONT,
263 ETH_RX_CQE_TYPE_TPA_END,
186 MAX_ETH_RX_CQE_TYPE 264 MAX_ETH_RX_CQE_TYPE
187}; 265};
188 266
189/* ETH Rx producers data */ 267/* ETH Rx producers data */
190struct eth_rx_prod_data { 268struct eth_rx_prod_data {
191 __le16 bd_prod; 269 __le16 bd_prod;
192 __le16 sge_prod;
193 __le16 cqe_prod; 270 __le16 cqe_prod;
194 __le16 reserved; 271 __le16 reserved;
272 __le16 reserved1;
195}; 273};
196 274
197/* The first tx bd of a given packet */ 275/* The first tx bd of a given packet */
@@ -211,12 +289,17 @@ struct eth_tx_2nd_bd {
211/* The parsing information data for the third tx bd of a given packet. */ 289/* The parsing information data for the third tx bd of a given packet. */
212struct eth_tx_data_3rd_bd { 290struct eth_tx_data_3rd_bd {
213 __le16 lso_mss; 291 __le16 lso_mss;
214 u8 bitfields; 292 __le16 bitfields;
215#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF 293#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
216#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 294#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
217#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF 295#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
218#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 296#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
219 u8 resereved0[3]; 297#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
298#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
299#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
300#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
301 u8 tunn_l4_hdr_start_offset_w;
302 u8 tunn_hdr_size_w;
220}; 303};
221 304
222/* The third tx bd of a given packet */ 305/* The third tx bd of a given packet */
@@ -226,12 +309,24 @@ struct eth_tx_3rd_bd {
226 struct eth_tx_data_3rd_bd data; 309 struct eth_tx_data_3rd_bd data;
227}; 310};
228 311
312/* Complementary information for the regular tx bd of a given packet. */
313struct eth_tx_data_bd {
314 __le16 reserved0;
315 __le16 bitfields;
316#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
317#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
318#define ETH_TX_DATA_BD_START_BD_MASK 0x1
319#define ETH_TX_DATA_BD_START_BD_SHIFT 8
320#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
321#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
322 __le16 reserved3;
323};
324
229/* The common non-special TX BD ring element */ 325/* The common non-special TX BD ring element */
230struct eth_tx_bd { 326struct eth_tx_bd {
231 struct regpair addr; 327 struct regpair addr;
232 __le16 nbytes; 328 __le16 nbytes;
233 __le16 reserved0; 329 struct eth_tx_data_bd data;
234 __le32 reserved1;
235}; 330};
236 331
237union eth_tx_bd_types { 332union eth_tx_bd_types {
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 41b9049b57e2..5f8fcaaa6504 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -19,6 +19,10 @@
19/* dma_addr_t manip */ 19/* dma_addr_t manip */
20#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) 20#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
21#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) 21#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
22#define DMA_REGPAIR_LE(x, val) do { \
23 (x).hi = DMA_HI_LE((val)); \
24 (x).lo = DMA_LO_LE((val)); \
25 } while (0)
22 26
23#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) 27#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
24#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) 28#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 81ab178e31c1..e1d69834a11f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -33,10 +33,20 @@ struct qed_update_vport_params {
33 u8 vport_id; 33 u8 vport_id;
34 u8 update_vport_active_flg; 34 u8 update_vport_active_flg;
35 u8 vport_active_flg; 35 u8 vport_active_flg;
36 u8 update_accept_any_vlan_flg;
37 u8 accept_any_vlan;
36 u8 update_rss_flg; 38 u8 update_rss_flg;
37 struct qed_update_vport_rss_params rss_params; 39 struct qed_update_vport_rss_params rss_params;
38}; 40};
39 41
42struct qed_start_vport_params {
43 bool remove_inner_vlan;
44 bool gro_enable;
45 bool drop_ttl0;
46 u8 vport_id;
47 u16 mtu;
48};
49
40struct qed_stop_rxq_params { 50struct qed_stop_rxq_params {
41 u8 rss_id; 51 u8 rss_id;
42 u8 rx_queue_id; 52 u8 rx_queue_id;
@@ -116,9 +126,7 @@ struct qed_eth_ops {
116 void *cookie); 126 void *cookie);
117 127
118 int (*vport_start)(struct qed_dev *cdev, 128 int (*vport_start)(struct qed_dev *cdev,
119 u8 vport_id, u16 mtu, 129 struct qed_start_vport_params *params);
120 u8 drop_ttl0_flg,
121 u8 inner_vlan_removal_en_flg);
122 130
123 int (*vport_stop)(struct qed_dev *cdev, 131 int (*vport_stop)(struct qed_dev *cdev,
124 u8 vport_id); 132 u8 vport_id);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d4a32e878180..1f7599c77cd4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -80,7 +80,7 @@ struct qed_dev_info {
80 u8 num_hwfns; 80 u8 num_hwfns;
81 81
82 u8 hw_mac[ETH_ALEN]; 82 u8 hw_mac[ETH_ALEN];
83 bool is_mf; 83 bool is_mf_default;
84 84
85 /* FW version */ 85 /* FW version */
86 u16 fw_major; 86 u16 fw_major;
@@ -360,6 +360,12 @@ enum DP_MODULE {
360 /* to be added...up to 0x8000000 */ 360 /* to be added...up to 0x8000000 */
361}; 361};
362 362
363enum qed_mf_mode {
364 QED_MF_DEFAULT,
365 QED_MF_OVLAN,
366 QED_MF_NPAR,
367};
368
363struct qed_eth_stats { 369struct qed_eth_stats {
364 u64 no_buff_discards; 370 u64 no_buff_discards;
365 u64 packet_too_big_discard; 371 u64 packet_too_big_discard;
@@ -440,6 +446,12 @@ struct qed_eth_stats {
440#define RX_PI 0 446#define RX_PI 0
441#define TX_PI(tc) (RX_PI + 1 + tc) 447#define TX_PI(tc) (RX_PI + 1 + tc)
442 448
449struct qed_sb_cnt_info {
450 int sb_cnt;
451 int sb_iov_cnt;
452 int sb_free_blk;
453};
454
443static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) 455static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
444{ 456{
445 u32 prod = 0; 457 u32 prod = 0;
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
index bd466439c588..3bdfa70bc642 100644
--- a/include/linux/quicklist.h
+++ b/include/linux/quicklist.h
@@ -5,7 +5,7 @@
5 * as needed after allocation when they are freed. Per cpu lists of pages 5 * as needed after allocation when they are freed. Per cpu lists of pages
6 * are kept that only contain node local pages. 6 * are kept that only contain node local pages.
7 * 7 *
8 * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com> 8 * (C) 2007, SGI. Christoph Lameter <cl@linux.com>
9 */ 9 */
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
diff --git a/include/linux/quota.h b/include/linux/quota.h
index b2505acfd3c0..9dfb6bce8c9e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -306,6 +306,7 @@ struct quota_format_ops {
306 int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */ 306 int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */
307 int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ 307 int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */
308 int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ 308 int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */
309 int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */
309}; 310};
310 311
311/* Operations working with dquots */ 312/* Operations working with dquots */
@@ -321,6 +322,8 @@ struct dquot_operations {
321 * quota code only */ 322 * quota code only */
322 qsize_t *(*get_reserved_space) (struct inode *); 323 qsize_t *(*get_reserved_space) (struct inode *);
323 int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */ 324 int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
325 /* Get next ID with active quota structure */
326 int (*get_next_id) (struct super_block *sb, struct kqid *qid);
324}; 327};
325 328
326struct path; 329struct path;
@@ -425,6 +428,8 @@ struct quotactl_ops {
425 int (*quota_sync)(struct super_block *, int); 428 int (*quota_sync)(struct super_block *, int);
426 int (*set_info)(struct super_block *, int, struct qc_info *); 429 int (*set_info)(struct super_block *, int, struct qc_info *);
427 int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); 430 int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
431 int (*get_nextdqblk)(struct super_block *, struct kqid *,
432 struct qc_dqblk *);
428 int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); 433 int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
429 int (*get_state)(struct super_block *, struct qc_state *); 434 int (*get_state)(struct super_block *, struct qc_state *);
430 int (*rm_xquota)(struct super_block *, unsigned int); 435 int (*rm_xquota)(struct super_block *, unsigned int);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 7a57c28eb5e7..f00fa86ac966 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -82,6 +82,7 @@ int dquot_commit(struct dquot *dquot);
82int dquot_acquire(struct dquot *dquot); 82int dquot_acquire(struct dquot *dquot);
83int dquot_release(struct dquot *dquot); 83int dquot_release(struct dquot *dquot);
84int dquot_commit_info(struct super_block *sb, int type); 84int dquot_commit_info(struct super_block *sb, int type);
85int dquot_get_next_id(struct super_block *sb, struct kqid *qid);
85int dquot_mark_dquot_dirty(struct dquot *dquot); 86int dquot_mark_dquot_dirty(struct dquot *dquot);
86 87
87int dquot_file_open(struct inode *inode, struct file *file); 88int dquot_file_open(struct inode *inode, struct file *file);
@@ -99,6 +100,8 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state);
99int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii); 100int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
100int dquot_get_dqblk(struct super_block *sb, struct kqid id, 101int dquot_get_dqblk(struct super_block *sb, struct kqid id,
101 struct qc_dqblk *di); 102 struct qc_dqblk *di);
103int dquot_get_next_dqblk(struct super_block *sb, struct kqid *id,
104 struct qc_dqblk *di);
102int dquot_set_dqblk(struct super_block *sb, struct kqid id, 105int dquot_set_dqblk(struct super_block *sb, struct kqid id,
103 struct qc_dqblk *di); 106 struct qc_dqblk *di);
104 107
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f54be7082207..51a97ac8bfbf 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -21,6 +21,7 @@
21#ifndef _LINUX_RADIX_TREE_H 21#ifndef _LINUX_RADIX_TREE_H
22#define _LINUX_RADIX_TREE_H 22#define _LINUX_RADIX_TREE_H
23 23
24#include <linux/bitops.h>
24#include <linux/preempt.h> 25#include <linux/preempt.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/bug.h> 27#include <linux/bug.h>
@@ -270,8 +271,15 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
270} 271}
271 272
272int __radix_tree_create(struct radix_tree_root *root, unsigned long index, 273int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
273 struct radix_tree_node **nodep, void ***slotp); 274 unsigned order, struct radix_tree_node **nodep,
274int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); 275 void ***slotp);
276int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
277 unsigned order, void *);
278static inline int radix_tree_insert(struct radix_tree_root *root,
279 unsigned long index, void *entry)
280{
281 return __radix_tree_insert(root, index, 0, entry);
282}
275void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, 283void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
276 struct radix_tree_node **nodep, void ***slotp); 284 struct radix_tree_node **nodep, void ***slotp);
277void *radix_tree_lookup(struct radix_tree_root *, unsigned long); 285void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
@@ -395,6 +403,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
395} 403}
396 404
397/** 405/**
406 * radix_tree_iter_next - resume iterating when the chunk may be invalid
407 * @iter: iterator state
408 *
409 * If the iterator needs to release then reacquire a lock, the chunk may
410 * have been invalidated by an insertion or deletion. Call this function
411 * to continue the iteration from the next index.
412 */
413static inline __must_check
414void **radix_tree_iter_next(struct radix_tree_iter *iter)
415{
416 iter->next_index = iter->index + 1;
417 iter->tags = 0;
418 return NULL;
419}
420
421/**
398 * radix_tree_chunk_size - get current chunk size 422 * radix_tree_chunk_size - get current chunk size
399 * 423 *
400 * @iter: pointer to radix tree iterator 424 * @iter: pointer to radix tree iterator
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 14ec1652daf4..17d4f849c65e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -319,6 +319,27 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
319}) 319})
320 320
321/** 321/**
322 * list_next_or_null_rcu - get the first element from a list
323 * @head: the head for the list.
324 * @ptr: the list head to take the next element from.
325 * @type: the type of the struct this is embedded in.
326 * @member: the name of the list_head within the struct.
327 *
328 * Note that if the ptr is at the end of the list, NULL is returned.
329 *
330 * This primitive may safely run concurrently with the _rcu list-mutation
331 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
332 */
333#define list_next_or_null_rcu(head, ptr, type, member) \
334({ \
335 struct list_head *__head = (head); \
336 struct list_head *__ptr = (ptr); \
337 struct list_head *__next = READ_ONCE(__ptr->next); \
338 likely(__next != __head) ? list_entry_rcu(__next, type, \
339 member) : NULL; \
340})
341
342/**
322 * list_for_each_entry_rcu - iterate over rcu list of given type 343 * list_for_each_entry_rcu - iterate over rcu list of given type
323 * @pos: the type * to use as a loop cursor. 344 * @pos: the type * to use as a loop cursor.
324 * @head: the head for your list. 345 * @head: the head for your list.
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 1c33dd7da4a7..4ae95f7e8597 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
98 if (!is_a_nulls(first)) 98 if (!is_a_nulls(first))
99 first->pprev = &n->next; 99 first->pprev = &n->next;
100} 100}
101
102/**
103 * hlist_nulls_add_tail_rcu
104 * @n: the element to add to the hash list.
105 * @h: the list to add to.
106 *
107 * Description:
108 * Adds the specified element to the end of the specified hlist_nulls,
109 * while permitting racing traversals. NOTE: tail insertion requires
110 * list traversal.
111 *
112 * The caller must take whatever precautions are necessary
113 * (such as holding appropriate locks) to avoid racing
114 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
115 * or hlist_nulls_del_rcu(), running on this same list.
116 * However, it is perfectly legal to run concurrently with
117 * the _rcu list-traversal primitives, such as
118 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
119 * problems on Alpha CPUs. Regardless of the type of CPU, the
120 * list-traversal primitive must be guarded by rcu_read_lock().
121 */
122static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
123 struct hlist_nulls_head *h)
124{
125 struct hlist_nulls_node *i, *last = NULL;
126
127 for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
128 i = hlist_nulls_next_rcu(i))
129 last = i;
130
131 if (last) {
132 n->next = last->next;
133 n->pprev = &last->next;
134 rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
135 } else {
136 hlist_nulls_add_head_rcu(n, h);
137 }
138}
139
101/** 140/**
102 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 141 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
103 * @tpos: the type * to use as a loop cursor. 142 * @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 14e6f47ee16f..2657aff2725b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -332,9 +332,7 @@ void rcu_init(void);
332void rcu_sched_qs(void); 332void rcu_sched_qs(void);
333void rcu_bh_qs(void); 333void rcu_bh_qs(void);
334void rcu_check_callbacks(int user); 334void rcu_check_callbacks(int user);
335struct notifier_block; 335void rcu_report_dead(unsigned int cpu);
336int rcu_cpu_notify(struct notifier_block *self,
337 unsigned long action, void *hcpu);
338 336
339#ifndef CONFIG_TINY_RCU 337#ifndef CONFIG_TINY_RCU
340void rcu_end_inkernel_boot(void); 338void rcu_end_inkernel_boot(void);
@@ -360,8 +358,6 @@ void rcu_user_exit(void);
360#else 358#else
361static inline void rcu_user_enter(void) { } 359static inline void rcu_user_enter(void) { }
362static inline void rcu_user_exit(void) { } 360static inline void rcu_user_exit(void) { }
363static inline void rcu_user_hooks_switch(struct task_struct *prev,
364 struct task_struct *next) { }
365#endif /* CONFIG_NO_HZ_FULL */ 361#endif /* CONFIG_NO_HZ_FULL */
366 362
367#ifdef CONFIG_RCU_NOCB_CPU 363#ifdef CONFIG_RCU_NOCB_CPU
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 18394343f489..3dc08ce15426 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -65,6 +65,36 @@ struct reg_sequence {
65 unsigned int delay_us; 65 unsigned int delay_us;
66}; 66};
67 67
68#define regmap_update_bits(map, reg, mask, val) \
69 regmap_update_bits_base(map, reg, mask, val, NULL, false, false)
70#define regmap_update_bits_async(map, reg, mask, val)\
71 regmap_update_bits_base(map, reg, mask, val, NULL, true, false)
72#define regmap_update_bits_check(map, reg, mask, val, change)\
73 regmap_update_bits_base(map, reg, mask, val, change, false, false)
74#define regmap_update_bits_check_async(map, reg, mask, val, change)\
75 regmap_update_bits_base(map, reg, mask, val, change, true, false)
76
77#define regmap_write_bits(map, reg, mask, val) \
78 regmap_update_bits_base(map, reg, mask, val, NULL, false, true)
79
80#define regmap_field_write(field, val) \
81 regmap_field_update_bits_base(field, ~0, val, NULL, false, false)
82#define regmap_field_force_write(field, val) \
83 regmap_field_update_bits_base(field, ~0, val, NULL, false, true)
84#define regmap_field_update_bits(field, mask, val)\
85 regmap_field_update_bits_base(field, mask, val, NULL, false, false)
86#define regmap_field_force_update_bits(field, mask, val) \
87 regmap_field_update_bits_base(field, mask, val, NULL, false, true)
88
89#define regmap_fields_write(field, id, val) \
90 regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false)
91#define regmap_fields_force_write(field, id, val) \
92 regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true)
93#define regmap_fields_update_bits(field, id, mask, val)\
94 regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false)
95#define regmap_fields_force_update_bits(field, id, mask, val) \
96 regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
97
68#ifdef CONFIG_REGMAP 98#ifdef CONFIG_REGMAP
69 99
70enum regmap_endian { 100enum regmap_endian {
@@ -162,7 +192,7 @@ typedef void (*regmap_unlock)(void *);
162 * This field is a duplicate of a similar file in 192 * This field is a duplicate of a similar file in
163 * 'struct regmap_bus' and serves exact same purpose. 193 * 'struct regmap_bus' and serves exact same purpose.
164 * Use it only for "no-bus" cases. 194 * Use it only for "no-bus" cases.
165 * @max_register: Optional, specifies the maximum valid register index. 195 * @max_register: Optional, specifies the maximum valid register address.
166 * @wr_table: Optional, points to a struct regmap_access_table specifying 196 * @wr_table: Optional, points to a struct regmap_access_table specifying
167 * valid ranges for write access. 197 * valid ranges for write access.
168 * @rd_table: As above, for read access. 198 * @rd_table: As above, for read access.
@@ -691,18 +721,9 @@ int regmap_raw_read(struct regmap *map, unsigned int reg,
691 void *val, size_t val_len); 721 void *val, size_t val_len);
692int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 722int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
693 size_t val_count); 723 size_t val_count);
694int regmap_update_bits(struct regmap *map, unsigned int reg, 724int regmap_update_bits_base(struct regmap *map, unsigned int reg,
695 unsigned int mask, unsigned int val); 725 unsigned int mask, unsigned int val,
696int regmap_write_bits(struct regmap *map, unsigned int reg, 726 bool *change, bool async, bool force);
697 unsigned int mask, unsigned int val);
698int regmap_update_bits_async(struct regmap *map, unsigned int reg,
699 unsigned int mask, unsigned int val);
700int regmap_update_bits_check(struct regmap *map, unsigned int reg,
701 unsigned int mask, unsigned int val,
702 bool *change);
703int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
704 unsigned int mask, unsigned int val,
705 bool *change);
706int regmap_get_val_bytes(struct regmap *map); 727int regmap_get_val_bytes(struct regmap *map);
707int regmap_get_max_register(struct regmap *map); 728int regmap_get_max_register(struct regmap *map);
708int regmap_get_reg_stride(struct regmap *map); 729int regmap_get_reg_stride(struct regmap *map);
@@ -770,18 +791,14 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
770void devm_regmap_field_free(struct device *dev, struct regmap_field *field); 791void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
771 792
772int regmap_field_read(struct regmap_field *field, unsigned int *val); 793int regmap_field_read(struct regmap_field *field, unsigned int *val);
773int regmap_field_write(struct regmap_field *field, unsigned int val); 794int regmap_field_update_bits_base(struct regmap_field *field,
774int regmap_field_update_bits(struct regmap_field *field, 795 unsigned int mask, unsigned int val,
775 unsigned int mask, unsigned int val); 796 bool *change, bool async, bool force);
776
777int regmap_fields_write(struct regmap_field *field, unsigned int id,
778 unsigned int val);
779int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
780 unsigned int val);
781int regmap_fields_read(struct regmap_field *field, unsigned int id, 797int regmap_fields_read(struct regmap_field *field, unsigned int id,
782 unsigned int *val); 798 unsigned int *val);
783int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 799int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
784 unsigned int mask, unsigned int val); 800 unsigned int mask, unsigned int val,
801 bool *change, bool async, bool force);
785 802
786/** 803/**
787 * Description of an IRQ for the generic regmap irq_chip. 804 * Description of an IRQ for the generic regmap irq_chip.
@@ -868,6 +885,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
868 int irq_base, const struct regmap_irq_chip *chip, 885 int irq_base, const struct regmap_irq_chip *chip,
869 struct regmap_irq_chip_data **data); 886 struct regmap_irq_chip_data **data);
870void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); 887void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
888
889int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
890 int irq_flags, int irq_base,
891 const struct regmap_irq_chip *chip,
892 struct regmap_irq_chip_data **data);
893void devm_regmap_del_irq_chip(struct device *dev, int irq,
894 struct regmap_irq_chip_data *data);
895
871int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data); 896int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
872int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq); 897int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
873struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data); 898struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
@@ -937,42 +962,26 @@ static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
937 return -EINVAL; 962 return -EINVAL;
938} 963}
939 964
940static inline int regmap_update_bits(struct regmap *map, unsigned int reg, 965static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
941 unsigned int mask, unsigned int val) 966 unsigned int mask, unsigned int val,
942{ 967 bool *change, bool async, bool force)
943 WARN_ONCE(1, "regmap API is disabled");
944 return -EINVAL;
945}
946
947static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
948 unsigned int mask, unsigned int val)
949{
950 WARN_ONCE(1, "regmap API is disabled");
951 return -EINVAL;
952}
953
954static inline int regmap_update_bits_async(struct regmap *map,
955 unsigned int reg,
956 unsigned int mask, unsigned int val)
957{ 968{
958 WARN_ONCE(1, "regmap API is disabled"); 969 WARN_ONCE(1, "regmap API is disabled");
959 return -EINVAL; 970 return -EINVAL;
960} 971}
961 972
962static inline int regmap_update_bits_check(struct regmap *map, 973static inline int regmap_field_update_bits_base(struct regmap_field *field,
963 unsigned int reg, 974 unsigned int mask, unsigned int val,
964 unsigned int mask, unsigned int val, 975 bool *change, bool async, bool force)
965 bool *change)
966{ 976{
967 WARN_ONCE(1, "regmap API is disabled"); 977 WARN_ONCE(1, "regmap API is disabled");
968 return -EINVAL; 978 return -EINVAL;
969} 979}
970 980
971static inline int regmap_update_bits_check_async(struct regmap *map, 981static inline int regmap_fields_update_bits_base(struct regmap_field *field,
972 unsigned int reg, 982 unsigned int id,
973 unsigned int mask, 983 unsigned int mask, unsigned int val,
974 unsigned int val, 984 bool *change, bool async, bool force)
975 bool *change)
976{ 985{
977 WARN_ONCE(1, "regmap API is disabled"); 986 WARN_ONCE(1, "regmap API is disabled");
978 return -EINVAL; 987 return -EINVAL;
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 15fa8f2d35c9..2eb386017fa5 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -68,12 +68,12 @@ enum {
68 * act8865_regulator_data - regulator data 68 * act8865_regulator_data - regulator data
69 * @id: regulator id 69 * @id: regulator id
70 * @name: regulator name 70 * @name: regulator name
71 * @platform_data: regulator init data 71 * @init_data: regulator init data
72 */ 72 */
73struct act8865_regulator_data { 73struct act8865_regulator_data {
74 int id; 74 int id;
75 const char *name; 75 const char *name;
76 struct regulator_init_data *platform_data; 76 struct regulator_init_data *init_data;
77}; 77};
78 78
79/** 79/**
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 16ac9e108806..cd271e89a7e6 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -93,6 +93,8 @@ struct regulator_linear_range {
93 * @get_current_limit: Get the configured limit for a current-limited regulator. 93 * @get_current_limit: Get the configured limit for a current-limited regulator.
94 * @set_input_current_limit: Configure an input limit. 94 * @set_input_current_limit: Configure an input limit.
95 * 95 *
96 * @set_active_discharge: Set active discharge enable/disable of regulators.
97 *
96 * @set_mode: Set the configured operating mode for the regulator. 98 * @set_mode: Set the configured operating mode for the regulator.
97 * @get_mode: Get the configured operating mode for the regulator. 99 * @get_mode: Get the configured operating mode for the regulator.
98 * @get_status: Return actual (not as-configured) status of regulator, as a 100 * @get_status: Return actual (not as-configured) status of regulator, as a
@@ -149,6 +151,7 @@ struct regulator_ops {
149 151
150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); 152 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
151 int (*set_over_current_protection) (struct regulator_dev *); 153 int (*set_over_current_protection) (struct regulator_dev *);
154 int (*set_active_discharge) (struct regulator_dev *, bool enable);
152 155
153 /* enable/disable regulator */ 156 /* enable/disable regulator */
154 int (*enable) (struct regulator_dev *); 157 int (*enable) (struct regulator_dev *);
@@ -266,6 +269,14 @@ enum regulator_type {
266 * @bypass_mask: Mask for control when using regmap set_bypass 269 * @bypass_mask: Mask for control when using regmap set_bypass
267 * @bypass_val_on: Enabling value for control when using regmap set_bypass 270 * @bypass_val_on: Enabling value for control when using regmap set_bypass
268 * @bypass_val_off: Disabling value for control when using regmap set_bypass 271 * @bypass_val_off: Disabling value for control when using regmap set_bypass
272 * @active_discharge_off: Enabling value for control when using regmap
273 * set_active_discharge
274 * @active_discharge_on: Disabling value for control when using regmap
275 * set_active_discharge
276 * @active_discharge_mask: Mask for control when using regmap
277 * set_active_discharge
278 * @active_discharge_reg: Register for control when using regmap
279 * set_active_discharge
269 * 280 *
270 * @enable_time: Time taken for initial enable of regulator (in uS). 281 * @enable_time: Time taken for initial enable of regulator (in uS).
271 * @off_on_delay: guard time (in uS), before re-enabling a regulator 282 * @off_on_delay: guard time (in uS), before re-enabling a regulator
@@ -315,6 +326,10 @@ struct regulator_desc {
315 unsigned int bypass_mask; 326 unsigned int bypass_mask;
316 unsigned int bypass_val_on; 327 unsigned int bypass_val_on;
317 unsigned int bypass_val_off; 328 unsigned int bypass_val_off;
329 unsigned int active_discharge_on;
330 unsigned int active_discharge_off;
331 unsigned int active_discharge_mask;
332 unsigned int active_discharge_reg;
318 333
319 unsigned int enable_time; 334 unsigned int enable_time;
320 335
@@ -447,6 +462,8 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
447int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable); 462int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable);
448int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable); 463int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable);
449 464
465int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
466 bool enable);
450void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); 467void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
451 468
452#endif 469#endif
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index 132e05c46661..6029279f4eed 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -18,6 +18,9 @@
18 18
19#define LP872X_MAX_REGULATORS 9 19#define LP872X_MAX_REGULATORS 9
20 20
21#define LP8720_ENABLE_DELAY 200
22#define LP8725_ENABLE_DELAY 30000
23
21enum lp872x_regulator_id { 24enum lp872x_regulator_id {
22 LP8720_ID_BASE, 25 LP8720_ID_BASE,
23 LP8720_ID_LDO1 = LP8720_ID_BASE, 26 LP8720_ID_LDO1 = LP8720_ID_BASE,
@@ -79,12 +82,14 @@ struct lp872x_regulator_data {
79 * @update_config : if LP872X_GENERAL_CFG register is updated, set true 82 * @update_config : if LP872X_GENERAL_CFG register is updated, set true
80 * @regulator_data : platform regulator id and init data 83 * @regulator_data : platform regulator id and init data
81 * @dvs : dvs data for buck voltage control 84 * @dvs : dvs data for buck voltage control
85 * @enable_gpio : gpio pin number for enable control
82 */ 86 */
83struct lp872x_platform_data { 87struct lp872x_platform_data {
84 u8 general_config; 88 u8 general_config;
85 bool update_config; 89 bool update_config;
86 struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS]; 90 struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
87 struct lp872x_dvs *dvs; 91 struct lp872x_dvs *dvs;
92 int enable_gpio;
88}; 93};
89 94
90#endif 95#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a1067d0b3991..5d627c83a630 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,13 @@ struct regulator;
42#define REGULATOR_CHANGE_DRMS 0x10 42#define REGULATOR_CHANGE_DRMS 0x10
43#define REGULATOR_CHANGE_BYPASS 0x20 43#define REGULATOR_CHANGE_BYPASS 0x20
44 44
45/* Regulator active discharge flags */
46enum regulator_active_discharge {
47 REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
48 REGULATOR_ACTIVE_DISCHARGE_DISABLE,
49 REGULATOR_ACTIVE_DISCHARGE_ENABLE,
50};
51
45/** 52/**
46 * struct regulator_state - regulator state during low power system states 53 * struct regulator_state - regulator state during low power system states
47 * 54 *
@@ -100,6 +107,9 @@ struct regulator_state {
100 * @initial_state: Suspend state to set by default. 107 * @initial_state: Suspend state to set by default.
101 * @initial_mode: Mode to set at startup. 108 * @initial_mode: Mode to set at startup.
102 * @ramp_delay: Time to settle down after voltage change (unit: uV/us) 109 * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
110 * @active_discharge: Enable/disable active discharge. The enum
111 * regulator_active_discharge values are used for
112 * initialisation.
103 * @enable_time: Turn-on time of the rails (unit: microseconds) 113 * @enable_time: Turn-on time of the rails (unit: microseconds)
104 */ 114 */
105struct regulation_constraints { 115struct regulation_constraints {
@@ -140,6 +150,8 @@ struct regulation_constraints {
140 unsigned int ramp_delay; 150 unsigned int ramp_delay;
141 unsigned int enable_time; 151 unsigned int enable_time;
142 152
153 unsigned int active_discharge;
154
143 /* constraint flags */ 155 /* constraint flags */
144 unsigned always_on:1; /* regulator never off when system is on */ 156 unsigned always_on:1; /* regulator never off when system is on */
145 unsigned boot_on:1; /* bootloader/firmware enabled regulator */ 157 unsigned boot_on:1; /* bootloader/firmware enabled regulator */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index ce6b962ffed4..a3a5bcdb1d02 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -38,7 +38,7 @@ struct of_phandle_args;
38 * @nr_resets: number of reset controls in this reset controller device 38 * @nr_resets: number of reset controls in this reset controller device
39 */ 39 */
40struct reset_controller_dev { 40struct reset_controller_dev {
41 struct reset_control_ops *ops; 41 const struct reset_control_ops *ops;
42 struct module *owner; 42 struct module *owner;
43 struct list_head list; 43 struct list_head list;
44 struct device_node *of_node; 44 struct device_node *of_node;
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
deleted file mode 100644
index 20bcb55498cd..000000000000
--- a/include/linux/rfkill-gpio.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (c) 2011, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19
20#ifndef __RFKILL_GPIO_H
21#define __RFKILL_GPIO_H
22
23#include <linux/types.h>
24#include <linux/rfkill.h>
25
26/**
27 * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
28 * for unused gpio's, the expected value is -1.
29 * @name: name for the gpio rf kill instance
30 */
31
32struct rfkill_gpio_platform_data {
33 char *name;
34 enum rfkill_type type;
35};
36
37#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index d9010789b4e8..e6a0031d1b1f 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill);
104 * 104 *
105 * Pause polling -- say transmitter is off for other reasons. 105 * Pause polling -- say transmitter is off for other reasons.
106 * NOTE: not necessary for suspend/resume -- in that case the 106 * NOTE: not necessary for suspend/resume -- in that case the
107 * core stops polling anyway 107 * core stops polling anyway (but will also correctly handle
108 * the case of polling having been paused before suspend.)
108 */ 109 */
109void rfkill_pause_polling(struct rfkill *rfkill); 110void rfkill_pause_polling(struct rfkill *rfkill);
110 111
@@ -212,6 +213,15 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
212 * @rfkill: rfkill struct to query 213 * @rfkill: rfkill struct to query
213 */ 214 */
214bool rfkill_blocked(struct rfkill *rfkill); 215bool rfkill_blocked(struct rfkill *rfkill);
216
217/**
218 * rfkill_find_type - Helpper for finding rfkill type by name
219 * @name: the name of the type
220 *
221 * Returns enum rfkill_type that conrresponds the name.
222 */
223enum rfkill_type rfkill_find_type(const char *name);
224
215#else /* !RFKILL */ 225#else /* !RFKILL */
216static inline struct rfkill * __must_check 226static inline struct rfkill * __must_check
217rfkill_alloc(const char *name, 227rfkill_alloc(const char *name,
@@ -268,6 +278,12 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
268{ 278{
269 return false; 279 return false;
270} 280}
281
282static inline enum rfkill_type rfkill_find_type(const char *name)
283{
284 return RFKILL_TYPE_ALL;
285}
286
271#endif /* RFKILL || RFKILL_MODULE */ 287#endif /* RFKILL || RFKILL_MODULE */
272 288
273 289
diff --git a/include/linux/rio.h b/include/linux/rio.h
index cde976e86b48..aa2323893e8d 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -137,6 +137,13 @@ struct rio_switch_ops {
137 int (*em_handle) (struct rio_dev *dev, u8 swport); 137 int (*em_handle) (struct rio_dev *dev, u8 swport);
138}; 138};
139 139
140enum rio_device_state {
141 RIO_DEVICE_INITIALIZING,
142 RIO_DEVICE_RUNNING,
143 RIO_DEVICE_GONE,
144 RIO_DEVICE_SHUTDOWN,
145};
146
140/** 147/**
141 * struct rio_dev - RIO device info 148 * struct rio_dev - RIO device info
142 * @global_list: Node in list of all RIO devices 149 * @global_list: Node in list of all RIO devices
@@ -165,6 +172,7 @@ struct rio_switch_ops {
165 * @destid: Network destination ID (or associated destid for switch) 172 * @destid: Network destination ID (or associated destid for switch)
166 * @hopcount: Hopcount to this device 173 * @hopcount: Hopcount to this device
167 * @prev: Previous RIO device connected to the current one 174 * @prev: Previous RIO device connected to the current one
175 * @state: device state
168 * @rswitch: struct rio_switch (if valid for this device) 176 * @rswitch: struct rio_switch (if valid for this device)
169 */ 177 */
170struct rio_dev { 178struct rio_dev {
@@ -194,6 +202,7 @@ struct rio_dev {
194 u16 destid; 202 u16 destid;
195 u8 hopcount; 203 u8 hopcount;
196 struct rio_dev *prev; 204 struct rio_dev *prev;
205 atomic_t state;
197 struct rio_switch rswitch[0]; /* RIO switch info */ 206 struct rio_switch rswitch[0]; /* RIO switch info */
198}; 207};
199 208
@@ -202,6 +211,7 @@ struct rio_dev {
202#define to_rio_dev(n) container_of(n, struct rio_dev, dev) 211#define to_rio_dev(n) container_of(n, struct rio_dev, dev)
203#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0]) 212#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
204#define to_rio_mport(n) container_of(n, struct rio_mport, dev) 213#define to_rio_mport(n) container_of(n, struct rio_mport, dev)
214#define to_rio_net(n) container_of(n, struct rio_net, dev)
205 215
206/** 216/**
207 * struct rio_msg - RIO message event 217 * struct rio_msg - RIO message event
@@ -235,8 +245,11 @@ enum rio_phy_type {
235/** 245/**
236 * struct rio_mport - RIO master port info 246 * struct rio_mport - RIO master port info
237 * @dbells: List of doorbell events 247 * @dbells: List of doorbell events
248 * @pwrites: List of portwrite events
238 * @node: Node in global list of master ports 249 * @node: Node in global list of master ports
239 * @nnode: Node in network list of master ports 250 * @nnode: Node in network list of master ports
251 * @net: RIO net this mport is attached to
252 * @lock: lock to synchronize lists manipulations
240 * @iores: I/O mem resource that this master port interface owns 253 * @iores: I/O mem resource that this master port interface owns
241 * @riores: RIO resources that this master port interfaces owns 254 * @riores: RIO resources that this master port interfaces owns
242 * @inb_msg: RIO inbound message event descriptors 255 * @inb_msg: RIO inbound message event descriptors
@@ -253,11 +266,16 @@ enum rio_phy_type {
253 * @priv: Master port private data 266 * @priv: Master port private data
254 * @dma: DMA device associated with mport 267 * @dma: DMA device associated with mport
255 * @nscan: RapidIO network enumeration/discovery operations 268 * @nscan: RapidIO network enumeration/discovery operations
269 * @state: mport device state
270 * @pwe_refcnt: port-write enable ref counter to track enable/disable requests
256 */ 271 */
257struct rio_mport { 272struct rio_mport {
258 struct list_head dbells; /* list of doorbell events */ 273 struct list_head dbells; /* list of doorbell events */
274 struct list_head pwrites; /* list of portwrite events */
259 struct list_head node; /* node in global list of ports */ 275 struct list_head node; /* node in global list of ports */
260 struct list_head nnode; /* node in net list of ports */ 276 struct list_head nnode; /* node in net list of ports */
277 struct rio_net *net; /* RIO net this mport is attached to */
278 struct mutex lock;
261 struct resource iores; 279 struct resource iores;
262 struct resource riores[RIO_MAX_MPORT_RESOURCES]; 280 struct resource riores[RIO_MAX_MPORT_RESOURCES];
263 struct rio_msg inb_msg[RIO_MAX_MBOX]; 281 struct rio_msg inb_msg[RIO_MAX_MBOX];
@@ -280,20 +298,20 @@ struct rio_mport {
280 struct dma_device dma; 298 struct dma_device dma;
281#endif 299#endif
282 struct rio_scan *nscan; 300 struct rio_scan *nscan;
301 atomic_t state;
302 unsigned int pwe_refcnt;
283}; 303};
284 304
305static inline int rio_mport_is_running(struct rio_mport *mport)
306{
307 return atomic_read(&mport->state) == RIO_DEVICE_RUNNING;
308}
309
285/* 310/*
286 * Enumeration/discovery control flags 311 * Enumeration/discovery control flags
287 */ 312 */
288#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */ 313#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */
289 314
290struct rio_id_table {
291 u16 start; /* logical minimal id */
292 u32 max; /* max number of IDs in table */
293 spinlock_t lock;
294 unsigned long *table;
295};
296
297/** 315/**
298 * struct rio_net - RIO network info 316 * struct rio_net - RIO network info
299 * @node: Node in global list of RIO networks 317 * @node: Node in global list of RIO networks
@@ -302,7 +320,9 @@ struct rio_id_table {
302 * @mports: List of master ports accessing this network 320 * @mports: List of master ports accessing this network
303 * @hport: Default port for accessing this network 321 * @hport: Default port for accessing this network
304 * @id: RIO network ID 322 * @id: RIO network ID
305 * @destid_table: destID allocation table 323 * @dev: Device object
324 * @enum_data: private data specific to a network enumerator
325 * @release: enumerator-specific release callback
306 */ 326 */
307struct rio_net { 327struct rio_net {
308 struct list_head node; /* node in list of networks */ 328 struct list_head node; /* node in list of networks */
@@ -311,7 +331,53 @@ struct rio_net {
311 struct list_head mports; /* list of ports accessing net */ 331 struct list_head mports; /* list of ports accessing net */
312 struct rio_mport *hport; /* primary port for accessing net */ 332 struct rio_mport *hport; /* primary port for accessing net */
313 unsigned char id; /* RIO network ID */ 333 unsigned char id; /* RIO network ID */
314 struct rio_id_table destid_table; /* destID allocation table */ 334 struct device dev;
335 void *enum_data; /* private data for enumerator of the network */
336 void (*release)(struct rio_net *net);
337};
338
339enum rio_link_speed {
340 RIO_LINK_DOWN = 0, /* SRIO Link not initialized */
341 RIO_LINK_125 = 1, /* 1.25 GBaud */
342 RIO_LINK_250 = 2, /* 2.5 GBaud */
343 RIO_LINK_312 = 3, /* 3.125 GBaud */
344 RIO_LINK_500 = 4, /* 5.0 GBaud */
345 RIO_LINK_625 = 5 /* 6.25 GBaud */
346};
347
348enum rio_link_width {
349 RIO_LINK_1X = 0,
350 RIO_LINK_1XR = 1,
351 RIO_LINK_2X = 3,
352 RIO_LINK_4X = 2,
353 RIO_LINK_8X = 4,
354 RIO_LINK_16X = 5
355};
356
357enum rio_mport_flags {
358 RIO_MPORT_DMA = (1 << 0), /* supports DMA data transfers */
359 RIO_MPORT_DMA_SG = (1 << 1), /* DMA supports HW SG mode */
360 RIO_MPORT_IBSG = (1 << 2), /* inbound mapping supports SG */
361};
362
363/**
364 * struct rio_mport_attr - RIO mport device attributes
365 * @flags: mport device capability flags
366 * @link_speed: SRIO link speed value (as defined by RapidIO specification)
367 * @link_width: SRIO link width value (as defined by RapidIO specification)
368 * @dma_max_sge: number of SG list entries that can be handled by DMA channel(s)
369 * @dma_max_size: max number of bytes in single DMA transfer (SG entry)
370 * @dma_align: alignment shift for DMA operations (as for other DMA operations)
371 */
372struct rio_mport_attr {
373 int flags;
374 int link_speed;
375 int link_width;
376
377 /* DMA capability info: valid only if RIO_MPORT_DMA flag is set */
378 int dma_max_sge;
379 int dma_max_size;
380 int dma_align;
315}; 381};
316 382
317/* Low-level architecture-dependent routines */ 383/* Low-level architecture-dependent routines */
@@ -333,6 +399,9 @@ struct rio_net {
333 * @get_inb_message: Callback to get a message from an inbound mailbox queue. 399 * @get_inb_message: Callback to get a message from an inbound mailbox queue.
334 * @map_inb: Callback to map RapidIO address region into local memory space. 400 * @map_inb: Callback to map RapidIO address region into local memory space.
335 * @unmap_inb: Callback to unmap RapidIO address region mapped with map_inb(). 401 * @unmap_inb: Callback to unmap RapidIO address region mapped with map_inb().
402 * @query_mport: Callback to query mport device attributes.
403 * @map_outb: Callback to map outbound address region into local memory space.
404 * @unmap_outb: Callback to unmap outbound RapidIO address region.
336 */ 405 */
337struct rio_ops { 406struct rio_ops {
338 int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, 407 int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
@@ -358,6 +427,11 @@ struct rio_ops {
358 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart, 427 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
359 u64 rstart, u32 size, u32 flags); 428 u64 rstart, u32 size, u32 flags);
360 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart); 429 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
430 int (*query_mport)(struct rio_mport *mport,
431 struct rio_mport_attr *attr);
432 int (*map_outb)(struct rio_mport *mport, u16 destid, u64 rstart,
433 u32 size, u32 flags, dma_addr_t *laddr);
434 void (*unmap_outb)(struct rio_mport *mport, u16 destid, u64 rstart);
361}; 435};
362 436
363#define RIO_RESOURCE_MEM 0x00000100 437#define RIO_RESOURCE_MEM 0x00000100
@@ -376,6 +450,7 @@ struct rio_ops {
376 * @id_table: RIO device ids to be associated with this driver 450 * @id_table: RIO device ids to be associated with this driver
377 * @probe: RIO device inserted 451 * @probe: RIO device inserted
378 * @remove: RIO device removed 452 * @remove: RIO device removed
453 * @shutdown: shutdown notification callback
379 * @suspend: RIO device suspended 454 * @suspend: RIO device suspended
380 * @resume: RIO device awakened 455 * @resume: RIO device awakened
381 * @enable_wake: RIO device enable wake event 456 * @enable_wake: RIO device enable wake event
@@ -390,6 +465,7 @@ struct rio_driver {
390 const struct rio_device_id *id_table; 465 const struct rio_device_id *id_table;
391 int (*probe) (struct rio_dev * dev, const struct rio_device_id * id); 466 int (*probe) (struct rio_dev * dev, const struct rio_device_id * id);
392 void (*remove) (struct rio_dev * dev); 467 void (*remove) (struct rio_dev * dev);
468 void (*shutdown)(struct rio_dev *dev);
393 int (*suspend) (struct rio_dev * dev, u32 state); 469 int (*suspend) (struct rio_dev * dev, u32 state);
394 int (*resume) (struct rio_dev * dev); 470 int (*resume) (struct rio_dev * dev);
395 int (*enable_wake) (struct rio_dev * dev, u32 state, int enable); 471 int (*enable_wake) (struct rio_dev * dev, u32 state, int enable);
@@ -476,10 +552,14 @@ struct rio_scan_node {
476}; 552};
477 553
478/* Architecture and hardware-specific functions */ 554/* Architecture and hardware-specific functions */
555extern int rio_mport_initialize(struct rio_mport *);
479extern int rio_register_mport(struct rio_mport *); 556extern int rio_register_mport(struct rio_mport *);
557extern int rio_unregister_mport(struct rio_mport *);
480extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); 558extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
481extern void rio_close_inb_mbox(struct rio_mport *, int); 559extern void rio_close_inb_mbox(struct rio_mport *, int);
482extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); 560extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
483extern void rio_close_outb_mbox(struct rio_mport *, int); 561extern void rio_close_outb_mbox(struct rio_mport *, int);
562extern int rio_query_mport(struct rio_mport *port,
563 struct rio_mport_attr *mport_attr);
484 564
485#endif /* LINUX_RIO_H */ 565#endif /* LINUX_RIO_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 9fc2f213e74f..0834264fb7f2 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -369,12 +369,24 @@ void rio_release_region(struct rio_dev *, int);
369extern int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, 369extern int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local,
370 u64 rbase, u32 size, u32 rflags); 370 u64 rbase, u32 size, u32 rflags);
371extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart); 371extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart);
372extern int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase,
373 u32 size, u32 rflags, dma_addr_t *local);
374extern void rio_unmap_outb_region(struct rio_mport *mport,
375 u16 destid, u64 rstart);
372 376
373/* Port-Write management */ 377/* Port-Write management */
374extern int rio_request_inb_pwrite(struct rio_dev *, 378extern int rio_request_inb_pwrite(struct rio_dev *,
375 int (*)(struct rio_dev *, union rio_pw_msg*, int)); 379 int (*)(struct rio_dev *, union rio_pw_msg*, int));
376extern int rio_release_inb_pwrite(struct rio_dev *); 380extern int rio_release_inb_pwrite(struct rio_dev *);
377extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg); 381extern int rio_add_mport_pw_handler(struct rio_mport *mport, void *dev_id,
382 int (*pwcback)(struct rio_mport *mport, void *dev_id,
383 union rio_pw_msg *msg, int step));
384extern int rio_del_mport_pw_handler(struct rio_mport *mport, void *dev_id,
385 int (*pwcback)(struct rio_mport *mport, void *dev_id,
386 union rio_pw_msg *msg, int step));
387extern int rio_inb_pwrite_handler(struct rio_mport *mport,
388 union rio_pw_msg *pw_msg);
389extern void rio_pw_enable(struct rio_mport *mport, int enable);
378 390
379/* LDM support */ 391/* LDM support */
380int rio_register_driver(struct rio_driver *); 392int rio_register_driver(struct rio_driver *);
@@ -435,6 +447,7 @@ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data)
435 447
436/* Misc driver helpers */ 448/* Misc driver helpers */
437extern u16 rio_local_get_device_id(struct rio_mport *port); 449extern u16 rio_local_get_device_id(struct rio_mport *port);
450extern void rio_local_set_device_id(struct rio_mport *port, u16 did);
438extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); 451extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
439extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, 452extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
440 struct rio_dev *from); 453 struct rio_dev *from);
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 218168a2b5e9..1063ae382bc2 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -238,6 +238,8 @@
238#define RIO_PORT_N_ACK_INBOUND 0x3f000000 238#define RIO_PORT_N_ACK_INBOUND 0x3f000000
239#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00 239#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00
240#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f 240#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f
241#define RIO_PORT_N_CTL2_CSR(x) (0x0054 + x*0x20)
242#define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000
241#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20) 243#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
242#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */ 244#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
243#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */ 245#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
@@ -249,6 +251,7 @@
249#define RIO_PORT_N_CTL_PWIDTH 0xc0000000 251#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
250#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 252#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
251#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 253#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
254#define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */
252#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 255#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
253#define RIO_PORT_N_CTL_LOCKOUT 0x00000002 256#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
254#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000 257#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index a07f42bedda3..49eb4f8ebac9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -86,6 +86,7 @@ enum ttu_flags {
86 TTU_MIGRATION = 2, /* migration mode */ 86 TTU_MIGRATION = 2, /* migration mode */
87 TTU_MUNLOCK = 4, /* munlock mode */ 87 TTU_MUNLOCK = 4, /* munlock mode */
88 TTU_LZFREE = 8, /* lazy free mode */ 88 TTU_LZFREE = 8, /* lazy free mode */
89 TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */
89 90
90 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ 91 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
91 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ 92 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
@@ -93,6 +94,8 @@ enum ttu_flags {
93 TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible 94 TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
94 * and caller guarantees they will 95 * and caller guarantees they will
95 * do a final flush if necessary */ 96 * do a final flush if necessary */
97 TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock:
98 * caller holds it */
96}; 99};
97 100
98#ifdef CONFIG_MMU 101#ifdef CONFIG_MMU
@@ -240,6 +243,8 @@ int page_mkclean(struct page *);
240 */ 243 */
241int try_to_munlock(struct page *); 244int try_to_munlock(struct page *);
242 245
246void remove_migration_ptes(struct page *old, struct page *new, bool locked);
247
243/* 248/*
244 * Called by memory-failure.c to kill processes. 249 * Called by memory-failure.c to kill processes.
245 */ 250 */
@@ -266,6 +271,7 @@ struct rmap_walk_control {
266}; 271};
267 272
268int rmap_walk(struct page *page, struct rmap_walk_control *rwc); 273int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
274int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
269 275
270#else /* !CONFIG_MMU */ 276#else /* !CONFIG_MMU */
271 277
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3359f0422c6b..b693adac853b 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -89,6 +89,8 @@ struct rtc_class_ops {
89 int (*set_mmss)(struct device *, unsigned long secs); 89 int (*set_mmss)(struct device *, unsigned long secs);
90 int (*read_callback)(struct device *, int data); 90 int (*read_callback)(struct device *, int data);
91 int (*alarm_irq_enable)(struct device *, unsigned int enabled); 91 int (*alarm_irq_enable)(struct device *, unsigned int enabled);
92 int (*read_offset)(struct device *, long *offset);
93 int (*set_offset)(struct device *, long offset);
92}; 94};
93 95
94#define RTC_DEVICE_NAME_SIZE 20 96#define RTC_DEVICE_NAME_SIZE 20
@@ -208,6 +210,8 @@ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
208int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, 210int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
209 ktime_t expires, ktime_t period); 211 ktime_t expires, ktime_t period);
210void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); 212void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
213int rtc_read_offset(struct rtc_device *rtc, long *offset);
214int rtc_set_offset(struct rtc_device *rtc, long offset);
211void rtc_timer_do_work(struct work_struct *work); 215void rtc_timer_do_work(struct work_struct *work);
212 216
213static inline bool is_leap_year(unsigned int year) 217static inline bool is_leap_year(unsigned int year)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a10494a94cc3..52c4847b05e2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -51,6 +51,7 @@ struct sched_param {
51#include <linux/resource.h> 51#include <linux/resource.h>
52#include <linux/timer.h> 52#include <linux/timer.h>
53#include <linux/hrtimer.h> 53#include <linux/hrtimer.h>
54#include <linux/kcov.h>
54#include <linux/task_io_accounting.h> 55#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h> 56#include <linux/latencytop.h>
56#include <linux/cred.h> 57#include <linux/cred.h>
@@ -182,8 +183,6 @@ extern void update_cpu_load_nohz(int active);
182static inline void update_cpu_load_nohz(int active) { } 183static inline void update_cpu_load_nohz(int active) { }
183#endif 184#endif
184 185
185extern unsigned long get_parent_ip(unsigned long addr);
186
187extern void dump_cpu_task(int cpu); 186extern void dump_cpu_task(int cpu);
188 187
189struct seq_file; 188struct seq_file;
@@ -427,6 +426,7 @@ extern signed long schedule_timeout(signed long timeout);
427extern signed long schedule_timeout_interruptible(signed long timeout); 426extern signed long schedule_timeout_interruptible(signed long timeout);
428extern signed long schedule_timeout_killable(signed long timeout); 427extern signed long schedule_timeout_killable(signed long timeout);
429extern signed long schedule_timeout_uninterruptible(signed long timeout); 428extern signed long schedule_timeout_uninterruptible(signed long timeout);
429extern signed long schedule_timeout_idle(signed long timeout);
430asmlinkage void schedule(void); 430asmlinkage void schedule(void);
431extern void schedule_preempt_disabled(void); 431extern void schedule_preempt_disabled(void);
432 432
@@ -719,6 +719,10 @@ struct signal_struct {
719 /* Earliest-expiration cache. */ 719 /* Earliest-expiration cache. */
720 struct task_cputime cputime_expires; 720 struct task_cputime cputime_expires;
721 721
722#ifdef CONFIG_NO_HZ_FULL
723 atomic_t tick_dep_mask;
724#endif
725
722 struct list_head cpu_timers[3]; 726 struct list_head cpu_timers[3];
723 727
724 struct pid *tty_old_pgrp; 728 struct pid *tty_old_pgrp;
@@ -775,7 +779,6 @@ struct signal_struct {
775#endif 779#endif
776#ifdef CONFIG_AUDIT 780#ifdef CONFIG_AUDIT
777 unsigned audit_tty; 781 unsigned audit_tty;
778 unsigned audit_tty_log_passwd;
779 struct tty_audit_buf *tty_audit_buf; 782 struct tty_audit_buf *tty_audit_buf;
780#endif 783#endif
781 784
@@ -920,6 +923,10 @@ static inline int sched_info_on(void)
920#endif 923#endif
921} 924}
922 925
926#ifdef CONFIG_SCHEDSTATS
927void force_schedstat_enabled(void);
928#endif
929
923enum cpu_idle_type { 930enum cpu_idle_type {
924 CPU_IDLE, 931 CPU_IDLE,
925 CPU_NOT_IDLE, 932 CPU_NOT_IDLE,
@@ -1289,6 +1296,8 @@ struct sched_rt_entity {
1289 unsigned long timeout; 1296 unsigned long timeout;
1290 unsigned long watchdog_stamp; 1297 unsigned long watchdog_stamp;
1291 unsigned int time_slice; 1298 unsigned int time_slice;
1299 unsigned short on_rq;
1300 unsigned short on_list;
1292 1301
1293 struct sched_rt_entity *back; 1302 struct sched_rt_entity *back;
1294#ifdef CONFIG_RT_GROUP_SCHED 1303#ifdef CONFIG_RT_GROUP_SCHED
@@ -1329,10 +1338,6 @@ struct sched_dl_entity {
1329 * task has to wait for a replenishment to be performed at the 1338 * task has to wait for a replenishment to be performed at the
1330 * next firing of dl_timer. 1339 * next firing of dl_timer.
1331 * 1340 *
1332 * @dl_new tells if a new instance arrived. If so we must
1333 * start executing it with full runtime and reset its absolute
1334 * deadline;
1335 *
1336 * @dl_boosted tells if we are boosted due to DI. If so we are 1341 * @dl_boosted tells if we are boosted due to DI. If so we are
1337 * outside bandwidth enforcement mechanism (but only until we 1342 * outside bandwidth enforcement mechanism (but only until we
1338 * exit the critical section); 1343 * exit the critical section);
@@ -1340,7 +1345,7 @@ struct sched_dl_entity {
1340 * @dl_yielded tells if task gave up the cpu before consuming 1345 * @dl_yielded tells if task gave up the cpu before consuming
1341 * all its available runtime during the last job. 1346 * all its available runtime during the last job.
1342 */ 1347 */
1343 int dl_throttled, dl_new, dl_boosted, dl_yielded; 1348 int dl_throttled, dl_boosted, dl_yielded;
1344 1349
1345 /* 1350 /*
1346 * Bandwidth enforcement timer. Each -deadline task has its 1351 * Bandwidth enforcement timer. Each -deadline task has its
@@ -1542,6 +1547,10 @@ struct task_struct {
1542 VTIME_SYS, 1547 VTIME_SYS,
1543 } vtime_snap_whence; 1548 } vtime_snap_whence;
1544#endif 1549#endif
1550
1551#ifdef CONFIG_NO_HZ_FULL
1552 atomic_t tick_dep_mask;
1553#endif
1545 unsigned long nvcsw, nivcsw; /* context switch counts */ 1554 unsigned long nvcsw, nivcsw; /* context switch counts */
1546 u64 start_time; /* monotonic time in nsec */ 1555 u64 start_time; /* monotonic time in nsec */
1547 u64 real_start_time; /* boot based time in nsec */ 1556 u64 real_start_time; /* boot based time in nsec */
@@ -1784,8 +1793,8 @@ struct task_struct {
1784 * time slack values; these are used to round up poll() and 1793 * time slack values; these are used to round up poll() and
1785 * select() etc timeout values. These are in nanoseconds. 1794 * select() etc timeout values. These are in nanoseconds.
1786 */ 1795 */
1787 unsigned long timer_slack_ns; 1796 u64 timer_slack_ns;
1788 unsigned long default_timer_slack_ns; 1797 u64 default_timer_slack_ns;
1789 1798
1790#ifdef CONFIG_KASAN 1799#ifdef CONFIG_KASAN
1791 unsigned int kasan_depth; 1800 unsigned int kasan_depth;
@@ -1811,6 +1820,16 @@ struct task_struct {
1811 /* bitmask and counter of trace recursion */ 1820 /* bitmask and counter of trace recursion */
1812 unsigned long trace_recursion; 1821 unsigned long trace_recursion;
1813#endif /* CONFIG_TRACING */ 1822#endif /* CONFIG_TRACING */
1823#ifdef CONFIG_KCOV
1824 /* Coverage collection mode enabled for this task (0 if disabled). */
1825 enum kcov_mode kcov_mode;
1826 /* Size of the kcov_area. */
1827 unsigned kcov_size;
1828 /* Buffer for coverage collection. */
1829 void *kcov_area;
1830 /* kcov desciptor wired with this task or NULL. */
1831 struct kcov *kcov;
1832#endif
1814#ifdef CONFIG_MEMCG 1833#ifdef CONFIG_MEMCG
1815 struct mem_cgroup *memcg_in_oom; 1834 struct mem_cgroup *memcg_in_oom;
1816 gfp_t memcg_oom_gfp_mask; 1835 gfp_t memcg_oom_gfp_mask;
@@ -1830,6 +1849,9 @@ struct task_struct {
1830 unsigned long task_state_change; 1849 unsigned long task_state_change;
1831#endif 1850#endif
1832 int pagefault_disabled; 1851 int pagefault_disabled;
1852#ifdef CONFIG_MMU
1853 struct task_struct *oom_reaper_list;
1854#endif
1833/* CPU-specific state of this task */ 1855/* CPU-specific state of this task */
1834 struct thread_struct thread; 1856 struct thread_struct thread;
1835/* 1857/*
@@ -2356,10 +2378,7 @@ static inline void wake_up_nohz_cpu(int cpu) { }
2356#endif 2378#endif
2357 2379
2358#ifdef CONFIG_NO_HZ_FULL 2380#ifdef CONFIG_NO_HZ_FULL
2359extern bool sched_can_stop_tick(void);
2360extern u64 scheduler_tick_max_deferment(void); 2381extern u64 scheduler_tick_max_deferment(void);
2361#else
2362static inline bool sched_can_stop_tick(void) { return false; }
2363#endif 2382#endif
2364 2383
2365#ifdef CONFIG_SCHED_AUTOGROUP 2384#ifdef CONFIG_SCHED_AUTOGROUP
@@ -2855,10 +2874,18 @@ static inline unsigned long stack_not_used(struct task_struct *p)
2855 unsigned long *n = end_of_stack(p); 2874 unsigned long *n = end_of_stack(p);
2856 2875
2857 do { /* Skip over canary */ 2876 do { /* Skip over canary */
2877# ifdef CONFIG_STACK_GROWSUP
2878 n--;
2879# else
2858 n++; 2880 n++;
2881# endif
2859 } while (!*n); 2882 } while (!*n);
2860 2883
2884# ifdef CONFIG_STACK_GROWSUP
2885 return (unsigned long)end_of_stack(p) - (unsigned long)n;
2886# else
2861 return (unsigned long)n - (unsigned long)end_of_stack(p); 2887 return (unsigned long)n - (unsigned long)end_of_stack(p);
2888# endif
2862} 2889}
2863#endif 2890#endif
2864extern void set_task_stack_end_magic(struct task_struct *tsk); 2891extern void set_task_stack_end_magic(struct task_struct *tsk);
@@ -3207,4 +3234,13 @@ static inline unsigned long rlimit_max(unsigned int limit)
3207 return task_rlimit_max(current, limit); 3234 return task_rlimit_max(current, limit);
3208} 3235}
3209 3236
3237#ifdef CONFIG_CPU_FREQ
3238struct update_util_data {
3239 void (*func)(struct update_util_data *data,
3240 u64 time, unsigned long util, unsigned long max);
3241};
3242
3243void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
3244#endif /* CONFIG_CPU_FREQ */
3245
3210#endif 3246#endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index c9e4731cf10b..22db1e63707e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -14,27 +14,6 @@ extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
14enum { sysctl_hung_task_timeout_secs = 0 }; 14enum { sysctl_hung_task_timeout_secs = 0 };
15#endif 15#endif
16 16
17/*
18 * Default maximum number of active map areas, this limits the number of vmas
19 * per mm struct. Users can overwrite this number by sysctl but there is a
20 * problem.
21 *
22 * When a program's coredump is generated as ELF format, a section is created
23 * per a vma. In ELF, the number of sections is represented in unsigned short.
24 * This means the number of sections should be smaller than 65535 at coredump.
25 * Because the kernel adds some informative sections to a image of program at
26 * generating coredump, we need some margin. The number of extra sections is
27 * 1-3 now and depends on arch. We use "5" as safe margin, here.
28 *
29 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
30 * not a hard limit any more. Although some userspace tools can be surprised by
31 * that.
32 */
33#define MAPCOUNT_ELF_CORE_MARGIN (5)
34#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
35
36extern int sysctl_max_map_count;
37
38extern unsigned int sysctl_sched_latency; 17extern unsigned int sysctl_sched_latency;
39extern unsigned int sysctl_sched_min_granularity; 18extern unsigned int sysctl_sched_min_granularity;
40extern unsigned int sysctl_sched_wakeup_granularity; 19extern unsigned int sysctl_sched_wakeup_granularity;
@@ -95,4 +74,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write,
95 void __user *buffer, size_t *lenp, 74 void __user *buffer, size_t *lenp,
96 loff_t *ppos); 75 loff_t *ppos);
97 76
77extern int sysctl_schedstats(struct ctl_table *table, int write,
78 void __user *buffer, size_t *lenp,
79 loff_t *ppos);
80
98#endif /* _SCHED_SYSCTL_H */ 81#endif /* _SCHED_SYSCTL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 72ce932c69b2..35de50a65665 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -33,6 +33,7 @@ enum scpi_sensor_class {
33 VOLTAGE, 33 VOLTAGE,
34 CURRENT, 34 CURRENT,
35 POWER, 35 POWER,
36 ENERGY,
36}; 37};
37 38
38struct scpi_sensor_info { 39struct scpi_sensor_info {
@@ -68,7 +69,7 @@ struct scpi_ops {
68 struct scpi_dvfs_info *(*dvfs_get_info)(u8); 69 struct scpi_dvfs_info *(*dvfs_get_info)(u8);
69 int (*sensor_get_capability)(u16 *sensors); 70 int (*sensor_get_capability)(u16 *sensors);
70 int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *); 71 int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
71 int (*sensor_get_value)(u16, u32 *); 72 int (*sensor_get_value)(u16, u64 *);
72}; 73};
73 74
74#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) 75#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
diff --git a/include/linux/security.h b/include/linux/security.h
index 4824a4ccaf1c..157f0cb1e4d2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -24,10 +24,12 @@
24 24
25#include <linux/key.h> 25#include <linux/key.h>
26#include <linux/capability.h> 26#include <linux/capability.h>
27#include <linux/fs.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/err.h> 29#include <linux/err.h>
29#include <linux/string.h> 30#include <linux/string.h>
30#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/fs.h>
31 33
32struct linux_binprm; 34struct linux_binprm;
33struct cred; 35struct cred;
@@ -298,9 +300,11 @@ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
298void security_transfer_creds(struct cred *new, const struct cred *old); 300void security_transfer_creds(struct cred *new, const struct cred *old);
299int security_kernel_act_as(struct cred *new, u32 secid); 301int security_kernel_act_as(struct cred *new, u32 secid);
300int security_kernel_create_files_as(struct cred *new, struct inode *inode); 302int security_kernel_create_files_as(struct cred *new, struct inode *inode);
301int security_kernel_fw_from_file(struct file *file, char *buf, size_t size);
302int security_kernel_module_request(char *kmod_name); 303int security_kernel_module_request(char *kmod_name);
303int security_kernel_module_from_file(struct file *file); 304int security_kernel_module_from_file(struct file *file);
305int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
306int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
307 enum kernel_read_file_id id);
304int security_task_fix_setuid(struct cred *new, const struct cred *old, 308int security_task_fix_setuid(struct cred *new, const struct cred *old,
305 int flags); 309 int flags);
306int security_task_setpgid(struct task_struct *p, pid_t pgid); 310int security_task_setpgid(struct task_struct *p, pid_t pgid);
@@ -850,18 +854,20 @@ static inline int security_kernel_create_files_as(struct cred *cred,
850 return 0; 854 return 0;
851} 855}
852 856
853static inline int security_kernel_fw_from_file(struct file *file, 857static inline int security_kernel_module_request(char *kmod_name)
854 char *buf, size_t size)
855{ 858{
856 return 0; 859 return 0;
857} 860}
858 861
859static inline int security_kernel_module_request(char *kmod_name) 862static inline int security_kernel_read_file(struct file *file,
863 enum kernel_read_file_id id)
860{ 864{
861 return 0; 865 return 0;
862} 866}
863 867
864static inline int security_kernel_module_from_file(struct file *file) 868static inline int security_kernel_post_read_file(struct file *file,
869 char *buf, loff_t size,
870 enum kernel_read_file_id id)
865{ 871{
866 return 0; 872 return 0;
867} 873}
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dde00defbaa5..f3d45dd42695 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,13 +7,10 @@
7#include <linux/mutex.h> 7#include <linux/mutex.h>
8#include <linux/cpumask.h> 8#include <linux/cpumask.h>
9#include <linux/nodemask.h> 9#include <linux/nodemask.h>
10#include <linux/fs.h>
11#include <linux/cred.h>
10 12
11struct seq_operations; 13struct seq_operations;
12struct file;
13struct path;
14struct inode;
15struct dentry;
16struct user_namespace;
17 14
18struct seq_file { 15struct seq_file {
19 char *buf; 16 char *buf;
@@ -27,9 +24,7 @@ struct seq_file {
27 struct mutex lock; 24 struct mutex lock;
28 const struct seq_operations *op; 25 const struct seq_operations *op;
29 int poll_event; 26 int poll_event;
30#ifdef CONFIG_USER_NS 27 const struct file *file;
31 struct user_namespace *user_ns;
32#endif
33 void *private; 28 void *private;
34}; 29};
35 30
@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *);
147static inline struct user_namespace *seq_user_ns(struct seq_file *seq) 142static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
148{ 143{
149#ifdef CONFIG_USER_NS 144#ifdef CONFIG_USER_NS
150 return seq->user_ns; 145 return seq->file->f_cred->user_ns;
151#else 146#else
152 extern struct user_namespace init_user_ns; 147 extern struct user_namespace init_user_ns;
153 return &init_user_ns; 148 return &init_user_ns;
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index faa0e0370ce7..434879759725 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -76,6 +76,12 @@ struct uart_8250_ops {
76 void (*release_irq)(struct uart_8250_port *); 76 void (*release_irq)(struct uart_8250_port *);
77}; 77};
78 78
79struct uart_8250_em485 {
80 struct timer_list start_tx_timer; /* "rs485 start tx" timer */
81 struct timer_list stop_tx_timer; /* "rs485 stop tx" timer */
82 struct timer_list *active_timer; /* pointer to active timer */
83};
84
79/* 85/*
80 * This should be used by drivers which want to register 86 * This should be used by drivers which want to register
81 * their own 8250 ports without registering their own 87 * their own 8250 ports without registering their own
@@ -122,6 +128,8 @@ struct uart_8250_port {
122 /* 8250 specific callbacks */ 128 /* 8250 specific callbacks */
123 int (*dl_read)(struct uart_8250_port *); 129 int (*dl_read)(struct uart_8250_port *);
124 void (*dl_write)(struct uart_8250_port *, int); 130 void (*dl_write)(struct uart_8250_port *, int);
131
132 struct uart_8250_em485 *em485;
125}; 133};
126 134
127static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) 135static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index e03d6ba5e5b4..cbfcf38e220d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -342,21 +342,26 @@ struct earlycon_device {
342 342
343struct earlycon_id { 343struct earlycon_id {
344 char name[16]; 344 char name[16];
345 char compatible[128];
345 int (*setup)(struct earlycon_device *, const char *options); 346 int (*setup)(struct earlycon_device *, const char *options);
346} __aligned(32); 347} __aligned(32);
347 348
348extern int setup_earlycon(char *buf); 349extern const struct earlycon_id __earlycon_table[];
349extern int of_setup_earlycon(unsigned long addr, 350extern const struct earlycon_id __earlycon_table_end[];
350 int (*setup)(struct earlycon_device *, const char *)); 351
352#define OF_EARLYCON_DECLARE(_name, compat, fn) \
353 static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
354 __used __section(__earlycon_table) \
355 = { .name = __stringify(_name), \
356 .compatible = compat, \
357 .setup = fn }
351 358
352#define EARLYCON_DECLARE(_name, func) \ 359#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
353 static const struct earlycon_id __earlycon_##_name \
354 __used __section(__earlycon_table) \
355 = { .name = __stringify(_name), \
356 .setup = func }
357 360
358#define OF_EARLYCON_DECLARE(name, compat, fn) \ 361extern int setup_earlycon(char *buf);
359 _OF_DECLARE(earlycon, name, compat, fn, void *) 362extern int of_setup_earlycon(const struct earlycon_id *match,
363 unsigned long node,
364 const char *options);
360 365
361struct uart_port *uart_get_console(struct uart_port *ports, int nr, 366struct uart_port *uart_get_console(struct uart_port *ports, int nr,
362 struct console *c); 367 struct console *c);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d3fcd4591ce4..15d0df943466 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1161,10 +1161,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1161 to->l4_hash = from->l4_hash; 1161 to->l4_hash = from->l4_hash;
1162}; 1162};
1163 1163
1164static inline void skb_sender_cpu_clear(struct sk_buff *skb)
1165{
1166}
1167
1168#ifdef NET_SKBUFF_DATA_USES_OFFSET 1164#ifdef NET_SKBUFF_DATA_USES_OFFSET
1169static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1165static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1170{ 1166{
@@ -2186,6 +2182,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2186 return skb->csum_start - skb_headroom(skb); 2182 return skb->csum_start - skb_headroom(skb);
2187} 2183}
2188 2184
2185static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2186{
2187 return skb->head + skb->csum_start;
2188}
2189
2189static inline int skb_transport_offset(const struct sk_buff *skb) 2190static inline int skb_transport_offset(const struct sk_buff *skb)
2190{ 2191{
2191 return skb_transport_header(skb) - skb->data; 2192 return skb_transport_header(skb) - skb->data;
@@ -2424,6 +2425,10 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2424{ 2425{
2425 return __napi_alloc_skb(napi, length, GFP_ATOMIC); 2426 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2426} 2427}
2428void napi_consume_skb(struct sk_buff *skb, int budget);
2429
2430void __kfree_skb_flush(void);
2431void __kfree_skb_defer(struct sk_buff *skb);
2427 2432
2428/** 2433/**
2429 * __dev_alloc_pages - allocate page for network Rx 2434 * __dev_alloc_pages - allocate page for network Rx
@@ -2646,6 +2651,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len
2646 skb_headroom(skb) + len <= skb->hdr_len; 2651 skb_headroom(skb) + len <= skb->hdr_len;
2647} 2652}
2648 2653
2654static inline int skb_try_make_writable(struct sk_buff *skb,
2655 unsigned int write_len)
2656{
2657 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2658 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2659}
2660
2649static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 2661static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2650 int cloned) 2662 int cloned)
2651{ 2663{
@@ -3574,6 +3586,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3574struct skb_gso_cb { 3586struct skb_gso_cb {
3575 int mac_offset; 3587 int mac_offset;
3576 int encap_level; 3588 int encap_level;
3589 __wsum csum;
3577 __u16 csum_start; 3590 __u16 csum_start;
3578}; 3591};
3579#define SKB_SGO_CB_OFFSET 32 3592#define SKB_SGO_CB_OFFSET 32
@@ -3600,6 +3613,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3600 return 0; 3613 return 0;
3601} 3614}
3602 3615
3616static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3617{
3618 /* Do not update partial checksums if remote checksum is enabled. */
3619 if (skb->remcsum_offload)
3620 return;
3621
3622 SKB_GSO_CB(skb)->csum = res;
3623 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3624}
3625
3603/* Compute the checksum for a gso segment. First compute the checksum value 3626/* Compute the checksum for a gso segment. First compute the checksum value
3604 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and 3627 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
3605 * then add in skb->csum (checksum from csum_start to end of packet). 3628 * then add in skb->csum (checksum from csum_start to end of packet).
@@ -3610,15 +3633,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3610 */ 3633 */
3611static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) 3634static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3612{ 3635{
3613 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - 3636 unsigned char *csum_start = skb_transport_header(skb);
3614 skb_transport_offset(skb); 3637 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3615 __wsum partial; 3638 __wsum partial = SKB_GSO_CB(skb)->csum;
3616 3639
3617 partial = csum_partial(skb_transport_header(skb), plen, skb->csum); 3640 SKB_GSO_CB(skb)->csum = res;
3618 skb->csum = res; 3641 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3619 SKB_GSO_CB(skb)->csum_start -= plen;
3620 3642
3621 return csum_fold(partial); 3643 return csum_fold(csum_partial(csum_start, plen, partial));
3622} 3644}
3623 3645
3624static inline bool skb_is_gso(const struct sk_buff *skb) 3646static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3708,5 +3730,30 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3708 return hdr_len + skb_gso_transport_seglen(skb); 3730 return hdr_len + skb_gso_transport_seglen(skb);
3709} 3731}
3710 3732
3733/* Local Checksum Offload.
3734 * Compute outer checksum based on the assumption that the
3735 * inner checksum will be offloaded later.
3736 * See Documentation/networking/checksum-offloads.txt for
3737 * explanation of how this works.
3738 * Fill in outer checksum adjustment (e.g. with sum of outer
3739 * pseudo-header) before calling.
3740 * Also ensure that inner checksum is in linear data area.
3741 */
3742static inline __wsum lco_csum(struct sk_buff *skb)
3743{
3744 unsigned char *csum_start = skb_checksum_start(skb);
3745 unsigned char *l4_hdr = skb_transport_header(skb);
3746 __wsum partial;
3747
3748 /* Start with complement of inner checksum adjustment */
3749 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3750 skb->csum_offset));
3751
3752 /* Add in checksum of our headers (incl. outer checksum
3753 * adjustment filled in by caller) and return result.
3754 */
3755 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3756}
3757
3711#endif /* __KERNEL__ */ 3758#endif /* __KERNEL__ */
3712#endif /* _LINUX_SKBUFF_H */ 3759#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3627d5c1bc47..508bd827e6dc 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,7 +20,7 @@
20 * Flags to pass to kmem_cache_create(). 20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
22 */ 22 */
23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ 23#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
24#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 24#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 25#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 26#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
@@ -92,6 +92,12 @@
92# define SLAB_ACCOUNT 0x00000000UL 92# define SLAB_ACCOUNT 0x00000000UL
93#endif 93#endif
94 94
95#ifdef CONFIG_KASAN
96#define SLAB_KASAN 0x08000000UL
97#else
98#define SLAB_KASAN 0x00000000UL
99#endif
100
95/* The following flags affect the page allocator grouping pages by mobility */ 101/* The following flags affect the page allocator grouping pages by mobility */
96#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 102#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
97#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 103#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
@@ -314,7 +320,7 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment
314void kmem_cache_free(struct kmem_cache *, void *); 320void kmem_cache_free(struct kmem_cache *, void *);
315 321
316/* 322/*
317 * Bulk allocation and freeing operations. These are accellerated in an 323 * Bulk allocation and freeing operations. These are accelerated in an
318 * allocator specific way to avoid taking locks repeatedly or building 324 * allocator specific way to avoid taking locks repeatedly or building
319 * metadata structures unnecessarily. 325 * metadata structures unnecessarily.
320 * 326 *
@@ -323,6 +329,15 @@ void kmem_cache_free(struct kmem_cache *, void *);
323void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 329void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
324int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 330int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
325 331
332/*
333 * Caller must not use kfree_bulk() on memory not originally allocated
334 * by kmalloc(), because the SLOB allocator cannot handle this.
335 */
336static __always_inline void kfree_bulk(size_t size, void **p)
337{
338 kmem_cache_free_bulk(NULL, size, p);
339}
340
326#ifdef CONFIG_NUMA 341#ifdef CONFIG_NUMA
327void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; 342void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
328void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; 343void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
@@ -361,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
361{ 376{
362 void *ret = kmem_cache_alloc(s, flags); 377 void *ret = kmem_cache_alloc(s, flags);
363 378
364 kasan_kmalloc(s, ret, size); 379 kasan_kmalloc(s, ret, size, flags);
365 return ret; 380 return ret;
366} 381}
367 382
@@ -372,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
372{ 387{
373 void *ret = kmem_cache_alloc_node(s, gfpflags, node); 388 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
374 389
375 kasan_kmalloc(s, ret, size); 390 kasan_kmalloc(s, ret, size, gfpflags);
376 return ret; 391 return ret;
377} 392}
378#endif /* CONFIG_TRACING */ 393#endif /* CONFIG_TRACING */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cf139d3fa513..9edbbf352340 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -60,6 +60,9 @@ struct kmem_cache {
60 atomic_t allocmiss; 60 atomic_t allocmiss;
61 atomic_t freehit; 61 atomic_t freehit;
62 atomic_t freemiss; 62 atomic_t freemiss;
63#ifdef CONFIG_DEBUG_SLAB_LEAK
64 atomic_t store_user_clean;
65#endif
63 66
64 /* 67 /*
65 * If debugging is enabled, then the allocator can add additional 68 * If debugging is enabled, then the allocator can add additional
@@ -73,8 +76,22 @@ struct kmem_cache {
73#ifdef CONFIG_MEMCG 76#ifdef CONFIG_MEMCG
74 struct memcg_cache_params memcg_params; 77 struct memcg_cache_params memcg_params;
75#endif 78#endif
79#ifdef CONFIG_KASAN
80 struct kasan_cache kasan_info;
81#endif
76 82
77 struct kmem_cache_node *node[MAX_NUMNODES]; 83 struct kmem_cache_node *node[MAX_NUMNODES];
78}; 84};
79 85
86static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
87 void *x) {
88 void *object = x - (x - page->s_mem) % cache->size;
89 void *last_object = page->s_mem + (cache->num - 1) * cache->size;
90
91 if (unlikely(object > last_object))
92 return last_object;
93 else
94 return object;
95}
96
80#endif /* _LINUX_SLAB_DEF_H */ 97#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index b7e57927f521..665cd0cd18b8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -81,6 +81,7 @@ struct kmem_cache {
81 int reserved; /* Reserved bytes at the end of slabs */ 81 int reserved; /* Reserved bytes at the end of slabs */
82 const char *name; /* Name (only for display!) */ 82 const char *name; /* Name (only for display!) */
83 struct list_head list; /* List of slab caches */ 83 struct list_head list; /* List of slab caches */
84 int red_left_pad; /* Left redzone padding size */
84#ifdef CONFIG_SYSFS 85#ifdef CONFIG_SYSFS
85 struct kobject kobj; /* For sysfs */ 86 struct kobject kobj; /* For sysfs */
86#endif 87#endif
@@ -129,4 +130,15 @@ static inline void *virt_to_obj(struct kmem_cache *s,
129void object_err(struct kmem_cache *s, struct page *page, 130void object_err(struct kmem_cache *s, struct page *page,
130 u8 *object, char *reason); 131 u8 *object, char *reason);
131 132
133static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
134 void *x) {
135 void *object = x - (x - page_address(page)) % cache->size;
136 void *last_object = page_address(page) +
137 (page->objects - 1) * cache->size;
138 if (unlikely(object > last_object))
139 return last_object;
140 else
141 return object;
142}
143
132#endif /* _LINUX_SLUB_DEF_H */ 144#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
new file mode 100644
index 000000000000..e2e9de1acc5b
--- /dev/null
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Header for EXYNOS PMU Driver support
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __LINUX_SOC_EXYNOS_PMU_H
13#define __LINUX_SOC_EXYNOS_PMU_H
14
15enum sys_powerdown {
16 SYS_AFTR,
17 SYS_LPA,
18 SYS_SLEEP,
19 NUM_SYS_POWERDOWN,
20};
21
22extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
23
24#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
new file mode 100644
index 000000000000..d30186e2b609
--- /dev/null
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -0,0 +1,693 @@
1/*
2 * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * EXYNOS - Power management unit definition
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H
13#define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__
14
15#define S5P_CENTRAL_SEQ_CONFIGURATION 0x0200
16
17#define S5P_CENTRAL_LOWPWR_CFG (1 << 16)
18
19#define S5P_CENTRAL_SEQ_OPTION 0x0208
20
21#define S5P_USE_STANDBY_WFI0 (1 << 16)
22#define S5P_USE_STANDBY_WFI1 (1 << 17)
23#define S5P_USE_STANDBY_WFI2 (1 << 19)
24#define S5P_USE_STANDBY_WFI3 (1 << 20)
25#define S5P_USE_STANDBY_WFE0 (1 << 24)
26#define S5P_USE_STANDBY_WFE1 (1 << 25)
27#define S5P_USE_STANDBY_WFE2 (1 << 27)
28#define S5P_USE_STANDBY_WFE3 (1 << 28)
29
30#define S5P_USE_STANDBY_WFI_ALL \
31 (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFI1 | \
32 S5P_USE_STANDBY_WFI2 | S5P_USE_STANDBY_WFI3 | \
33 S5P_USE_STANDBY_WFE0 | S5P_USE_STANDBY_WFE1 | \
34 S5P_USE_STANDBY_WFE2 | S5P_USE_STANDBY_WFE3)
35
36#define S5P_USE_DELAYED_RESET_ASSERTION BIT(12)
37
38#define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n)
39#define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28)
40#define EXYNOS_SWRESET 0x0400
41#define EXYNOS5440_SWRESET 0x00C4
42
43#define S5P_WAKEUP_STAT 0x0600
44#define S5P_EINT_WAKEUP_MASK 0x0604
45#define S5P_WAKEUP_MASK 0x0608
46#define S5P_WAKEUP_MASK2 0x0614
47
48#define S5P_INFORM0 0x0800
49#define S5P_INFORM1 0x0804
50#define S5P_INFORM5 0x0814
51#define S5P_INFORM6 0x0818
52#define S5P_INFORM7 0x081C
53#define S5P_PMU_SPARE2 0x0908
54#define S5P_PMU_SPARE3 0x090C
55
56#define EXYNOS_IROM_DATA2 0x0988
57#define S5P_ARM_CORE0_LOWPWR 0x1000
58#define S5P_DIS_IRQ_CORE0 0x1004
59#define S5P_DIS_IRQ_CENTRAL0 0x1008
60#define S5P_ARM_CORE1_LOWPWR 0x1010
61#define S5P_DIS_IRQ_CORE1 0x1014
62#define S5P_DIS_IRQ_CENTRAL1 0x1018
63#define S5P_ARM_COMMON_LOWPWR 0x1080
64#define S5P_L2_0_LOWPWR 0x10C0
65#define S5P_L2_1_LOWPWR 0x10C4
66#define S5P_CMU_ACLKSTOP_LOWPWR 0x1100
67#define S5P_CMU_SCLKSTOP_LOWPWR 0x1104
68#define S5P_CMU_RESET_LOWPWR 0x110C
69#define S5P_APLL_SYSCLK_LOWPWR 0x1120
70#define S5P_MPLL_SYSCLK_LOWPWR 0x1124
71#define S5P_VPLL_SYSCLK_LOWPWR 0x1128
72#define S5P_EPLL_SYSCLK_LOWPWR 0x112C
73#define S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR 0x1138
74#define S5P_CMU_RESET_GPSALIVE_LOWPWR 0x113C
75#define S5P_CMU_CLKSTOP_CAM_LOWPWR 0x1140
76#define S5P_CMU_CLKSTOP_TV_LOWPWR 0x1144
77#define S5P_CMU_CLKSTOP_MFC_LOWPWR 0x1148
78#define S5P_CMU_CLKSTOP_G3D_LOWPWR 0x114C
79#define S5P_CMU_CLKSTOP_LCD0_LOWPWR 0x1150
80#define S5P_CMU_CLKSTOP_MAUDIO_LOWPWR 0x1158
81#define S5P_CMU_CLKSTOP_GPS_LOWPWR 0x115C
82#define S5P_CMU_RESET_CAM_LOWPWR 0x1160
83#define S5P_CMU_RESET_TV_LOWPWR 0x1164
84#define S5P_CMU_RESET_MFC_LOWPWR 0x1168
85#define S5P_CMU_RESET_G3D_LOWPWR 0x116C
86#define S5P_CMU_RESET_LCD0_LOWPWR 0x1170
87#define S5P_CMU_RESET_MAUDIO_LOWPWR 0x1178
88#define S5P_CMU_RESET_GPS_LOWPWR 0x117C
89#define S5P_TOP_BUS_LOWPWR 0x1180
90#define S5P_TOP_RETENTION_LOWPWR 0x1184
91#define S5P_TOP_PWR_LOWPWR 0x1188
92#define S5P_LOGIC_RESET_LOWPWR 0x11A0
93#define S5P_ONENAND_MEM_LOWPWR 0x11C0
94#define S5P_G2D_ACP_MEM_LOWPWR 0x11C8
95#define S5P_USBOTG_MEM_LOWPWR 0x11CC
96#define S5P_HSMMC_MEM_LOWPWR 0x11D0
97#define S5P_CSSYS_MEM_LOWPWR 0x11D4
98#define S5P_SECSS_MEM_LOWPWR 0x11D8
99#define S5P_PAD_RETENTION_DRAM_LOWPWR 0x1200
100#define S5P_PAD_RETENTION_MAUDIO_LOWPWR 0x1204
101#define S5P_PAD_RETENTION_GPIO_LOWPWR 0x1220
102#define S5P_PAD_RETENTION_UART_LOWPWR 0x1224
103#define S5P_PAD_RETENTION_MMCA_LOWPWR 0x1228
104#define S5P_PAD_RETENTION_MMCB_LOWPWR 0x122C
105#define S5P_PAD_RETENTION_EBIA_LOWPWR 0x1230
106#define S5P_PAD_RETENTION_EBIB_LOWPWR 0x1234
107#define S5P_PAD_RETENTION_ISOLATION_LOWPWR 0x1240
108#define S5P_PAD_RETENTION_ALV_SEL_LOWPWR 0x1260
109#define S5P_XUSBXTI_LOWPWR 0x1280
110#define S5P_XXTI_LOWPWR 0x1284
111#define S5P_EXT_REGULATOR_LOWPWR 0x12C0
112#define S5P_GPIO_MODE_LOWPWR 0x1300
113#define S5P_GPIO_MODE_MAUDIO_LOWPWR 0x1340
114#define S5P_CAM_LOWPWR 0x1380
115#define S5P_TV_LOWPWR 0x1384
116#define S5P_MFC_LOWPWR 0x1388
117#define S5P_G3D_LOWPWR 0x138C
118#define S5P_LCD0_LOWPWR 0x1390
119#define S5P_MAUDIO_LOWPWR 0x1398
120#define S5P_GPS_LOWPWR 0x139C
121#define S5P_GPS_ALIVE_LOWPWR 0x13A0
122
123#define EXYNOS_ARM_CORE0_CONFIGURATION 0x2000
124#define EXYNOS_ARM_CORE_CONFIGURATION(_nr) \
125 (EXYNOS_ARM_CORE0_CONFIGURATION + (0x80 * (_nr)))
126#define EXYNOS_ARM_CORE_STATUS(_nr) \
127 (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x4)
128#define EXYNOS_ARM_CORE_OPTION(_nr) \
129 (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x8)
130
131#define EXYNOS_ARM_COMMON_CONFIGURATION 0x2500
132#define EXYNOS_COMMON_CONFIGURATION(_nr) \
133 (EXYNOS_ARM_COMMON_CONFIGURATION + (0x80 * (_nr)))
134#define EXYNOS_COMMON_STATUS(_nr) \
135 (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x4)
136#define EXYNOS_COMMON_OPTION(_nr) \
137 (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8)
138
139#define EXYNOS_CORE_LOCAL_PWR_EN 0x3
140
141#define EXYNOS_ARM_COMMON_STATUS 0x2504
142#define EXYNOS_COMMON_OPTION(_nr) \
143 (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8)
144
145#define EXYNOS_ARM_L2_CONFIGURATION 0x2600
146#define EXYNOS_L2_CONFIGURATION(_nr) \
147 (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80))
148#define EXYNOS_L2_STATUS(_nr) \
149 (EXYNOS_L2_CONFIGURATION(_nr) + 0x4)
150#define EXYNOS_L2_OPTION(_nr) \
151 (EXYNOS_L2_CONFIGURATION(_nr) + 0x8)
152#define EXYNOS_L2_COMMON_PWR_EN 0x3
153
154#define EXYNOS_ARM_CORE_X_STATUS_OFFSET 0x4
155
156#define EXYNOS5_APLL_SYSCLK_CONFIGURATION 0x2A00
157#define EXYNOS5_APLL_SYSCLK_STATUS 0x2A04
158
159#define EXYNOS5_ARM_L2_OPTION 0x2608
160#define EXYNOS5_USE_RETENTION BIT(4)
161
162#define EXYNOS5_L2RSTDISABLE_VALUE BIT(3)
163
164#define S5P_PAD_RET_MAUDIO_OPTION 0x3028
165#define S5P_PAD_RET_MMC2_OPTION 0x30c8
166#define S5P_PAD_RET_GPIO_OPTION 0x3108
167#define S5P_PAD_RET_UART_OPTION 0x3128
168#define S5P_PAD_RET_MMCA_OPTION 0x3148
169#define S5P_PAD_RET_MMCB_OPTION 0x3168
170#define S5P_PAD_RET_EBIA_OPTION 0x3188
171#define S5P_PAD_RET_EBIB_OPTION 0x31A8
172#define S5P_PAD_RET_SPI_OPTION 0x31c8
173
174#define S5P_PS_HOLD_CONTROL 0x330C
175#define S5P_PS_HOLD_EN (1 << 31)
176#define S5P_PS_HOLD_OUTPUT_HIGH (3 << 8)
177
178#define S5P_CAM_OPTION 0x3C08
179#define S5P_MFC_OPTION 0x3C48
180#define S5P_G3D_OPTION 0x3C68
181#define S5P_LCD0_OPTION 0x3C88
182#define S5P_LCD1_OPTION 0x3CA8
183#define S5P_ISP_OPTION S5P_LCD1_OPTION
184
185#define S5P_CORE_LOCAL_PWR_EN 0x3
186#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
187#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
188
189/* Only for EXYNOS4210 */
190#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
191#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
192#define S5P_MODIMIF_MEM_LOWPWR 0x11C4
193#define S5P_PCIE_MEM_LOWPWR 0x11E0
194#define S5P_SATA_MEM_LOWPWR 0x11E4
195#define S5P_LCD1_LOWPWR 0x1394
196
197/* Only for EXYNOS4x12 */
198#define S5P_ISP_ARM_LOWPWR 0x1050
199#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR 0x1054
200#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR 0x1058
201#define S5P_CMU_ACLKSTOP_COREBLK_LOWPWR 0x1110
202#define S5P_CMU_SCLKSTOP_COREBLK_LOWPWR 0x1114
203#define S5P_CMU_RESET_COREBLK_LOWPWR 0x111C
204#define S5P_MPLLUSER_SYSCLK_LOWPWR 0x1130
205#define S5P_CMU_CLKSTOP_ISP_LOWPWR 0x1154
206#define S5P_CMU_RESET_ISP_LOWPWR 0x1174
207#define S5P_TOP_BUS_COREBLK_LOWPWR 0x1190
208#define S5P_TOP_RETENTION_COREBLK_LOWPWR 0x1194
209#define S5P_TOP_PWR_COREBLK_LOWPWR 0x1198
210#define S5P_OSCCLK_GATE_LOWPWR 0x11A4
211#define S5P_LOGIC_RESET_COREBLK_LOWPWR 0x11B0
212#define S5P_OSCCLK_GATE_COREBLK_LOWPWR 0x11B4
213#define S5P_HSI_MEM_LOWPWR 0x11C4
214#define S5P_ROTATOR_MEM_LOWPWR 0x11DC
215#define S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR 0x123C
216#define S5P_PAD_ISOLATION_COREBLK_LOWPWR 0x1250
217#define S5P_GPIO_MODE_COREBLK_LOWPWR 0x1320
218#define S5P_TOP_ASB_RESET_LOWPWR 0x1344
219#define S5P_TOP_ASB_ISOLATION_LOWPWR 0x1348
220#define S5P_ISP_LOWPWR 0x1394
221#define S5P_DRAM_FREQ_DOWN_LOWPWR 0x13B0
222#define S5P_DDRPHY_DLLOFF_LOWPWR 0x13B4
223#define S5P_CMU_SYSCLK_ISP_LOWPWR 0x13B8
224#define S5P_CMU_SYSCLK_GPS_LOWPWR 0x13BC
225#define S5P_LPDDR_PHY_DLL_LOCK_LOWPWR 0x13C0
226
227#define S5P_ARM_L2_0_OPTION 0x2608
228#define S5P_ARM_L2_1_OPTION 0x2628
229#define S5P_ONENAND_MEM_OPTION 0x2E08
230#define S5P_HSI_MEM_OPTION 0x2E28
231#define S5P_G2D_ACP_MEM_OPTION 0x2E48
232#define S5P_USBOTG_MEM_OPTION 0x2E68
233#define S5P_HSMMC_MEM_OPTION 0x2E88
234#define S5P_CSSYS_MEM_OPTION 0x2EA8
235#define S5P_SECSS_MEM_OPTION 0x2EC8
236#define S5P_ROTATOR_MEM_OPTION 0x2F48
237
238/* Only for EXYNOS4412 */
239#define S5P_ARM_CORE2_LOWPWR 0x1020
240#define S5P_DIS_IRQ_CORE2 0x1024
241#define S5P_DIS_IRQ_CENTRAL2 0x1028
242#define S5P_ARM_CORE3_LOWPWR 0x1030
243#define S5P_DIS_IRQ_CORE3 0x1034
244#define S5P_DIS_IRQ_CENTRAL3 0x1038
245
246/* Only for EXYNOS3XXX */
247#define EXYNOS3_ARM_CORE0_SYS_PWR_REG 0x1000
248#define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004
249#define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008
250#define EXYNOS3_ARM_CORE1_SYS_PWR_REG 0x1010
251#define EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014
252#define EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018
253#define EXYNOS3_ISP_ARM_SYS_PWR_REG 0x1050
254#define EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054
255#define EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058
256#define EXYNOS3_ARM_COMMON_SYS_PWR_REG 0x1080
257#define EXYNOS3_ARM_L2_SYS_PWR_REG 0x10C0
258#define EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG 0x1100
259#define EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG 0x1104
260#define EXYNOS3_CMU_RESET_SYS_PWR_REG 0x110C
261#define EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG 0x1110
262#define EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG 0x1114
263#define EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG 0x111C
264#define EXYNOS3_APLL_SYSCLK_SYS_PWR_REG 0x1120
265#define EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG 0x1124
266#define EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG 0x1128
267#define EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG 0x112C
268#define EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1130
269#define EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1134
270#define EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG 0x1138
271#define EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG 0x1140
272#define EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1148
273#define EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x114C
274#define EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG 0x1150
275#define EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1154
276#define EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG 0x1158
277#define EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG 0x1160
278#define EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG 0x1168
279#define EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG 0x116C
280#define EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG 0x1170
281#define EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG 0x1174
282#define EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG 0x1178
283#define EXYNOS3_TOP_BUS_SYS_PWR_REG 0x1180
284#define EXYNOS3_TOP_RETENTION_SYS_PWR_REG 0x1184
285#define EXYNOS3_TOP_PWR_SYS_PWR_REG 0x1188
286#define EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG 0x1190
287#define EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG 0x1194
288#define EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG 0x1198
289#define EXYNOS3_LOGIC_RESET_SYS_PWR_REG 0x11A0
290#define EXYNOS3_OSCCLK_GATE_SYS_PWR_REG 0x11A4
291#define EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG 0x11B0
292#define EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG 0x11B4
293#define EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200
294#define EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG 0x1204
295#define EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG 0x1208
296#define EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1218
297#define EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220
298#define EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG 0x1224
299#define EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1228
300#define EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG 0x122C
301#define EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230
302#define EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234
303#define EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1238
304#define EXYNOS3_PAD_ISOLATION_SYS_PWR_REG 0x1240
305#define EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG 0x1260
306#define EXYNOS3_XUSBXTI_SYS_PWR_REG 0x1280
307#define EXYNOS3_XXTI_SYS_PWR_REG 0x1284
308#define EXYNOS3_EXT_REGULATOR_SYS_PWR_REG 0x12C0
309#define EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG 0x12C4
310#define EXYNOS3_GPIO_MODE_SYS_PWR_REG 0x1300
311#define EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG 0x1340
312#define EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG 0x1344
313#define EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348
314#define EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG 0x1350
315#define EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG 0x1354
316#define EXYNOS3_CAM_SYS_PWR_REG 0x1380
317#define EXYNOS3_MFC_SYS_PWR_REG 0x1388
318#define EXYNOS3_G3D_SYS_PWR_REG 0x138C
319#define EXYNOS3_LCD0_SYS_PWR_REG 0x1390
320#define EXYNOS3_ISP_SYS_PWR_REG 0x1394
321#define EXYNOS3_MAUDIO_SYS_PWR_REG 0x1398
322#define EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG 0x13B0
323#define EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG 0x13B4
324#define EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG 0x13B8
325#define EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG 0x13C0
326#define EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG 0x13C4
327#define EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG 0x13C8
328
329#define EXYNOS3_ARM_CORE0_OPTION 0x2008
330#define EXYNOS3_ARM_CORE_OPTION(_nr) \
331 (EXYNOS3_ARM_CORE0_OPTION + ((_nr) * 0x80))
332
333#define EXYNOS3_ARM_COMMON_OPTION 0x2408
334#define EXYNOS3_ARM_L2_OPTION 0x2608
335#define EXYNOS3_TOP_PWR_OPTION 0x2C48
336#define EXYNOS3_CORE_TOP_PWR_OPTION 0x2CA8
337#define EXYNOS3_XUSBXTI_DURATION 0x341C
338#define EXYNOS3_XXTI_DURATION 0x343C
339#define EXYNOS3_EXT_REGULATOR_DURATION 0x361C
340#define EXYNOS3_EXT_REGULATOR_COREBLK_DURATION 0x363C
341#define XUSBXTI_DURATION 0x00000BB8
342#define XXTI_DURATION XUSBXTI_DURATION
343#define EXT_REGULATOR_DURATION 0x00001D4C
344#define EXT_REGULATOR_COREBLK_DURATION EXT_REGULATOR_DURATION
345
346/* for XXX_OPTION */
347#define EXYNOS3_OPTION_USE_SC_COUNTER (1 << 0)
348#define EXYNOS3_OPTION_USE_SC_FEEDBACK (1 << 1)
349#define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
350
351/* For EXYNOS5 */
352
353#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408
354#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C
355
356#define EXYNOS5_USE_RETENTION BIT(4)
357#define EXYNOS5_SYS_WDTRESET (1 << 20)
358
359#define EXYNOS5_ARM_CORE0_SYS_PWR_REG 0x1000
360#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004
361#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008
362#define EXYNOS5_ARM_CORE1_SYS_PWR_REG 0x1010
363#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014
364#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018
365#define EXYNOS5_FSYS_ARM_SYS_PWR_REG 0x1040
366#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG 0x1048
367#define EXYNOS5_ISP_ARM_SYS_PWR_REG 0x1050
368#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054
369#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058
370#define EXYNOS5_ARM_COMMON_SYS_PWR_REG 0x1080
371#define EXYNOS5_ARM_L2_SYS_PWR_REG 0x10C0
372#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG 0x1100
373#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG 0x1104
374#define EXYNOS5_CMU_RESET_SYS_PWR_REG 0x110C
375#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG 0x1120
376#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG 0x1124
377#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG 0x112C
378#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG 0x1130
379#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG 0x1134
380#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG 0x1138
381#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG 0x1140
382#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG 0x1144
383#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG 0x1148
384#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG 0x114C
385#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG 0x1150
386#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG 0x1154
387#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1164
388#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1170
389#define EXYNOS5_TOP_BUS_SYS_PWR_REG 0x1180
390#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG 0x1184
391#define EXYNOS5_TOP_PWR_SYS_PWR_REG 0x1188
392#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG 0x1190
393#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG 0x1194
394#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG 0x1198
395#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG 0x11A0
396#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG 0x11A4
397#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG 0x11B0
398#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG 0x11B4
399#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG 0x11C0
400#define EXYNOS5_G2D_MEM_SYS_PWR_REG 0x11C8
401#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG 0x11CC
402#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG 0x11D0
403#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG 0x11D4
404#define EXYNOS5_SECSS_MEM_SYS_PWR_REG 0x11D8
405#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG 0x11DC
406#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG 0x11E0
407#define EXYNOS5_INTROM_MEM_SYS_PWR_REG 0x11E4
408#define EXYNOS5_JPEG_MEM_SYS_PWR_REG 0x11E8
409#define EXYNOS5_HSI_MEM_SYS_PWR_REG 0x11EC
410#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG 0x11F4
411#define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC
412#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200
413#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204
414#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG 0x1208
415#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220
416#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224
417#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228
418#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG 0x122C
419#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230
420#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234
421#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG 0x1238
422#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG 0x123C
423#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG 0x1240
424#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG 0x1250
425#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG 0x1260
426#define EXYNOS5_XUSBXTI_SYS_PWR_REG 0x1280
427#define EXYNOS5_XXTI_SYS_PWR_REG 0x1284
428#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG 0x12C0
429#define EXYNOS5_GPIO_MODE_SYS_PWR_REG 0x1300
430#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG 0x1320
431#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG 0x1340
432#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG 0x1344
433#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348
434#define EXYNOS5_GSCL_SYS_PWR_REG 0x1400
435#define EXYNOS5_ISP_SYS_PWR_REG 0x1404
436#define EXYNOS5_MFC_SYS_PWR_REG 0x1408
437#define EXYNOS5_G3D_SYS_PWR_REG 0x140C
438#define EXYNOS5_DISP1_SYS_PWR_REG 0x1414
439#define EXYNOS5_MAU_SYS_PWR_REG 0x1418
440#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG 0x1480
441#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1484
442#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1488
443#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x148C
444#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1494
445#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1498
446#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG 0x14C0
447#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG 0x14C4
448#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG 0x14C8
449#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG 0x14CC
450#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D4
451#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D8
452#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG 0x1580
453#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG 0x1584
454#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG 0x1588
455#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG 0x158C
456#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG 0x1594
457#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG 0x1598
458
459#define EXYNOS5_ARM_CORE0_OPTION 0x2008
460#define EXYNOS5_ARM_CORE1_OPTION 0x2088
461#define EXYNOS5_FSYS_ARM_OPTION 0x2208
462#define EXYNOS5_ISP_ARM_OPTION 0x2288
463#define EXYNOS5_ARM_COMMON_OPTION 0x2408
464#define EXYNOS5_ARM_L2_OPTION 0x2608
465#define EXYNOS5_TOP_PWR_OPTION 0x2C48
466#define EXYNOS5_TOP_PWR_SYSMEM_OPTION 0x2CC8
467#define EXYNOS5_JPEG_MEM_OPTION 0x2F48
468#define EXYNOS5_GSCL_OPTION 0x4008
469#define EXYNOS5_ISP_OPTION 0x4028
470#define EXYNOS5_MFC_OPTION 0x4048
471#define EXYNOS5_G3D_OPTION 0x4068
472#define EXYNOS5_DISP1_OPTION 0x40A8
473#define EXYNOS5_MAU_OPTION 0x40C8
474
475#define EXYNOS5_USE_SC_FEEDBACK (1 << 1)
476#define EXYNOS5_USE_SC_COUNTER (1 << 0)
477
478#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
479
480#define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24)
481#define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16)
482
483#define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
484
485#define EXYNOS5420_SWRESET_KFC_SEL 0x3
486
487/* Only for EXYNOS5420 */
488#define EXYNOS5420_ISP_ARM_OPTION 0x2488
489#define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3)
490
491#define EXYNOS5420_LPI_MASK 0x0004
492#define EXYNOS5420_LPI_MASK1 0x0008
493#define EXYNOS5420_UFS BIT(8)
494#define EXYNOS5420_ATB_KFC BIT(13)
495#define EXYNOS5420_ATB_ISP_ARM BIT(19)
496#define EXYNOS5420_EMULATION BIT(31)
497#define ATB_ISP_ARM BIT(12)
498#define ATB_KFC BIT(13)
499#define ATB_NOC BIT(14)
500
501#define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100
502#define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104
503#define EXYNOS5420_UP_SCHEDULER 0x0120
504#define SPREAD_ENABLE 0xF
505#define SPREAD_USE_STANDWFI 0xF
506
507#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
508#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
509
510#define EXYNOS5420_KFC_CORE_RESET(_nr) \
511 ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
512
513#define EXYNOS5420_BB_CON1 0x0784
514#define EXYNOS5420_BB_SEL_EN BIT(31)
515#define EXYNOS5420_BB_PMOS_EN BIT(7)
516#define EXYNOS5420_BB_1300X 0XF
517
518#define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020
519#define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024
520#define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028
521#define EXYNOS5420_ARM_CORE3_SYS_PWR_REG 0x1030
522#define EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG 0x1034
523#define EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG 0x1038
524#define EXYNOS5420_KFC_CORE0_SYS_PWR_REG 0x1040
525#define EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG 0x1044
526#define EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG 0x1048
527#define EXYNOS5420_KFC_CORE1_SYS_PWR_REG 0x1050
528#define EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG 0x1054
529#define EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG 0x1058
530#define EXYNOS5420_KFC_CORE2_SYS_PWR_REG 0x1060
531#define EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG 0x1064
532#define EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG 0x1068
533#define EXYNOS5420_KFC_CORE3_SYS_PWR_REG 0x1070
534#define EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG 0x1074
535#define EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG 0x1078
536#define EXYNOS5420_ISP_ARM_SYS_PWR_REG 0x1090
537#define EXYNOS5420_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1094
538#define EXYNOS5420_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1098
539#define EXYNOS5420_ARM_COMMON_SYS_PWR_REG 0x10A0
540#define EXYNOS5420_KFC_COMMON_SYS_PWR_REG 0x10B0
541#define EXYNOS5420_KFC_L2_SYS_PWR_REG 0x10D0
542#define EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG 0x1158
543#define EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG 0x115C
544#define EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG 0x1160
545#define EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG 0x1174
546#define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178
547#define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8
548#define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC
549#define EXYNOS5420_ONENANDXL_MEM_SYS_PWR 0x11C0
550#define EXYNOS5420_USBDEV_MEM_SYS_PWR 0x11CC
551#define EXYNOS5420_USBDEV1_MEM_SYS_PWR 0x11D0
552#define EXYNOS5420_SDMMC_MEM_SYS_PWR 0x11D4
553#define EXYNOS5420_CSSYS_MEM_SYS_PWR 0x11D8
554#define EXYNOS5420_SECSS_MEM_SYS_PWR 0x11DC
555#define EXYNOS5420_ROTATOR_MEM_SYS_PWR 0x11E0
556#define EXYNOS5420_INTRAM_MEM_SYS_PWR 0x11E4
557#define EXYNOS5420_INTROM_MEM_SYS_PWR 0x11E8
558#define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208
559#define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210
560#define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214
561#define EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1218
562#define EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG 0x121C
563#define EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1220
564#define EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG 0x1224
565#define EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1228
566#define EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG 0x122C
567#define EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG 0x1230
568#define EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG 0x1234
569#define EXYNOS5420_DISP1_SYS_PWR_REG 0x1410
570#define EXYNOS5420_MAU_SYS_PWR_REG 0x1414
571#define EXYNOS5420_G2D_SYS_PWR_REG 0x1418
572#define EXYNOS5420_MSC_SYS_PWR_REG 0x141C
573#define EXYNOS5420_FSYS_SYS_PWR_REG 0x1420
574#define EXYNOS5420_FSYS2_SYS_PWR_REG 0x1424
575#define EXYNOS5420_PSGEN_SYS_PWR_REG 0x1428
576#define EXYNOS5420_PERIC_SYS_PWR_REG 0x142C
577#define EXYNOS5420_WCORE_SYS_PWR_REG 0x1430
578#define EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1490
579#define EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1494
580#define EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG 0x1498
581#define EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG 0x149C
582#define EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG 0x14A0
583#define EXYNOS5420_CMU_CLKSTOP_FSYS2_SYS_PWR_REG 0x14A4
584#define EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG 0x14A8
585#define EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG 0x14AC
586#define EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG 0x14B0
587#define EXYNOS5420_CMU_SYSCLK_TOPPWR_SYS_PWR_REG 0x14BC
588#define EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D0
589#define EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D4
590#define EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG 0x14D8
591#define EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG 0x14DC
592#define EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG 0x14E0
593#define EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG 0x14E4
594#define EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG 0x14E8
595#define EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG 0x14EC
596#define EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG 0x14F0
597#define EXYNOS5420_CMU_SYSCLK_SYSMEM_TOPPWR_SYS_PWR_REG 0x14F4
598#define EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG 0x1570
599#define EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG 0x1574
600#define EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG 0x1578
601#define EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG 0x157C
602#define EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG 0x1590
603#define EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG 0x1594
604#define EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG 0x1598
605#define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C
606#define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0
607#define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4
608#define EXYNOS_ARM_CORE2_CONFIGURATION 0x2100
609#define EXYNOS5420_ARM_CORE2_OPTION 0x2108
610#define EXYNOS_ARM_CORE3_CONFIGURATION 0x2180
611#define EXYNOS5420_ARM_CORE3_OPTION 0x2188
612#define EXYNOS5420_ARM_COMMON_STATUS 0x2504
613#define EXYNOS5420_ARM_COMMON_OPTION 0x2508
614#define EXYNOS5420_KFC_COMMON_STATUS 0x2584
615#define EXYNOS5420_KFC_COMMON_OPTION 0x2588
616#define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C
617
618#define EXYNOS5420_PAD_RET_GPIO_OPTION 0x30C8
619#define EXYNOS5420_PAD_RET_UART_OPTION 0x30E8
620#define EXYNOS5420_PAD_RET_MMCA_OPTION 0x3108
621#define EXYNOS5420_PAD_RET_MMCB_OPTION 0x3128
622#define EXYNOS5420_PAD_RET_MMCC_OPTION 0x3148
623#define EXYNOS5420_PAD_RET_HSI_OPTION 0x3168
624#define EXYNOS5420_PAD_RET_SPI_OPTION 0x31C8
625#define EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION 0x31E8
626#define EXYNOS_PAD_RET_DRAM_OPTION 0x3008
627#define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028
628#define EXYNOS_PAD_RET_JTAG_OPTION 0x3048
629#define EXYNOS_PAD_RET_GPIO_OPTION 0x3108
630#define EXYNOS_PAD_RET_UART_OPTION 0x3128
631#define EXYNOS_PAD_RET_MMCA_OPTION 0x3148
632#define EXYNOS_PAD_RET_MMCB_OPTION 0x3168
633#define EXYNOS_PAD_RET_EBIA_OPTION 0x3188
634#define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8
635
636#define EXYNOS_PS_HOLD_CONTROL 0x330C
637
638/* For SYS_PWR_REG */
639#define EXYNOS_SYS_PWR_CFG BIT(0)
640
641#define EXYNOS5420_MFC_CONFIGURATION 0x4060
642#define EXYNOS5420_MFC_STATUS 0x4064
643#define EXYNOS5420_MFC_OPTION 0x4068
644#define EXYNOS5420_G3D_CONFIGURATION 0x4080
645#define EXYNOS5420_G3D_STATUS 0x4084
646#define EXYNOS5420_G3D_OPTION 0x4088
647#define EXYNOS5420_DISP0_CONFIGURATION 0x40A0
648#define EXYNOS5420_DISP0_STATUS 0x40A4
649#define EXYNOS5420_DISP0_OPTION 0x40A8
650#define EXYNOS5420_DISP1_CONFIGURATION 0x40C0
651#define EXYNOS5420_DISP1_STATUS 0x40C4
652#define EXYNOS5420_DISP1_OPTION 0x40C8
653#define EXYNOS5420_MAU_CONFIGURATION 0x40E0
654#define EXYNOS5420_MAU_STATUS 0x40E4
655#define EXYNOS5420_MAU_OPTION 0x40E8
656#define EXYNOS5420_FSYS2_OPTION 0x4168
657#define EXYNOS5420_PSGEN_OPTION 0x4188
658
659/* For EXYNOS_CENTRAL_SEQ_OPTION */
660#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16)
661#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17)
662#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24)
663#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25)
664
665#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4)
666#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5)
667#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6)
668#define EXYNOS5420_ARM_USE_STANDBY_WFI3 BIT(7)
669#define EXYNOS5420_KFC_USE_STANDBY_WFI0 BIT(8)
670#define EXYNOS5420_KFC_USE_STANDBY_WFI1 BIT(9)
671#define EXYNOS5420_KFC_USE_STANDBY_WFI2 BIT(10)
672#define EXYNOS5420_KFC_USE_STANDBY_WFI3 BIT(11)
673#define EXYNOS5420_ARM_USE_STANDBY_WFE0 BIT(16)
674#define EXYNOS5420_ARM_USE_STANDBY_WFE1 BIT(17)
675#define EXYNOS5420_ARM_USE_STANDBY_WFE2 BIT(18)
676#define EXYNOS5420_ARM_USE_STANDBY_WFE3 BIT(19)
677#define EXYNOS5420_KFC_USE_STANDBY_WFE0 BIT(20)
678#define EXYNOS5420_KFC_USE_STANDBY_WFE1 BIT(21)
679#define EXYNOS5420_KFC_USE_STANDBY_WFE2 BIT(22)
680#define EXYNOS5420_KFC_USE_STANDBY_WFE3 BIT(23)
681
682#define DUR_WAIT_RESET 0xF
683
684#define EXYNOS5420_USE_STANDBY_WFI_ALL (EXYNOS5420_ARM_USE_STANDBY_WFI0 \
685 | EXYNOS5420_ARM_USE_STANDBY_WFI1 \
686 | EXYNOS5420_ARM_USE_STANDBY_WFI2 \
687 | EXYNOS5420_ARM_USE_STANDBY_WFI3 \
688 | EXYNOS5420_KFC_USE_STANDBY_WFI0 \
689 | EXYNOS5420_KFC_USE_STANDBY_WFI1 \
690 | EXYNOS5420_KFC_USE_STANDBY_WFI2 \
691 | EXYNOS5420_KFC_USE_STANDBY_WFI3)
692
693#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h
new file mode 100644
index 000000000000..eac8e0c6fe11
--- /dev/null
+++ b/include/linux/soc/ti/ti-msgmgr.h
@@ -0,0 +1,35 @@
1/*
2 * Texas Instruments' Message Manager
3 *
4 * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
5 * Nishanth Menon
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef TI_MSGMGR_H
18#define TI_MSGMGR_H
19
20/**
21 * struct ti_msgmgr_message - Message Manager structure
22 * @len: Length of data in the Buffer
23 * @buf: Buffer pointer
24 *
25 * This is the structure for data used in mbox_send_message
26 * the length of data buffer used depends on the SoC integration
27 * parameters - each message may be 64, 128 bytes long depending
28 * on SoC. Client is supposed to be aware of this.
29 */
30struct ti_msgmgr_message {
31 size_t len;
32 u8 *buf;
33};
34
35#endif /* TI_MSGMGR_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5bf59c8493b7..73bf6c6a833b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -200,7 +200,9 @@ struct ucred {
200#define AF_ALG 38 /* Algorithm sockets */ 200#define AF_ALG 38 /* Algorithm sockets */
201#define AF_NFC 39 /* NFC sockets */ 201#define AF_NFC 39 /* NFC sockets */
202#define AF_VSOCK 40 /* vSockets */ 202#define AF_VSOCK 40 /* vSockets */
203#define AF_MAX 41 /* For now.. */ 203#define AF_KCM 41 /* Kernel Connection Multiplexor*/
204
205#define AF_MAX 42 /* For now.. */
204 206
205/* Protocol families, same as address families. */ 207/* Protocol families, same as address families. */
206#define PF_UNSPEC AF_UNSPEC 208#define PF_UNSPEC AF_UNSPEC
@@ -246,6 +248,7 @@ struct ucred {
246#define PF_ALG AF_ALG 248#define PF_ALG AF_ALG
247#define PF_NFC AF_NFC 249#define PF_NFC AF_NFC
248#define PF_VSOCK AF_VSOCK 250#define PF_VSOCK AF_VSOCK
251#define PF_KCM AF_KCM
249#define PF_MAX AF_MAX 252#define PF_MAX AF_MAX
250 253
251/* Maximum queue length specifiable by listen. */ 254/* Maximum queue length specifiable by listen. */
@@ -274,6 +277,7 @@ struct ucred {
274#define MSG_MORE 0x8000 /* Sender will send more */ 277#define MSG_MORE 0x8000 /* Sender will send more */
275#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ 278#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
276#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ 279#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
280#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
277#define MSG_EOF MSG_FIN 281#define MSG_EOF MSG_FIN
278 282
279#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 283#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
@@ -322,6 +326,7 @@ struct ucred {
322#define SOL_CAIF 278 326#define SOL_CAIF 278
323#define SOL_ALG 279 327#define SOL_ALG 279
324#define SOL_NFC 280 328#define SOL_NFC 280
329#define SOL_KCM 281
325 330
326/* IPX options */ 331/* IPX options */
327#define IPX_TYPE 1 332#define IPX_TYPE 1
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
index 403e007aef68..e34e169f9dcb 100644
--- a/include/linux/spi/eeprom.h
+++ b/include/linux/spi/eeprom.h
@@ -30,8 +30,6 @@ struct spi_eeprom {
30 */ 30 */
31#define EE_INSTR_BIT3_IS_ADDR 0x0010 31#define EE_INSTR_BIT3_IS_ADDR 0x0010
32 32
33 /* for exporting this chip's data to other kernel code */
34 void (*setup)(struct memory_accessor *mem, void *context);
35 void *context; 33 void *context;
36}; 34};
37 35
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 53be3a4c60cb..857a9a1d82b5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -25,6 +25,7 @@
25struct dma_chan; 25struct dma_chan;
26struct spi_master; 26struct spi_master;
27struct spi_transfer; 27struct spi_transfer;
28struct spi_flash_read_message;
28 29
29/* 30/*
30 * INTERFACES between SPI master-side drivers and SPI infrastructure. 31 * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -53,6 +54,10 @@ extern struct bus_type spi_bus_type;
53 * 54 *
54 * @transfer_bytes_histo: 55 * @transfer_bytes_histo:
55 * transfer bytes histogramm 56 * transfer bytes histogramm
57 *
58 * @transfers_split_maxsize:
59 * number of transfers that have been split because of
60 * maxsize limit
56 */ 61 */
57struct spi_statistics { 62struct spi_statistics {
58 spinlock_t lock; /* lock for the whole structure */ 63 spinlock_t lock; /* lock for the whole structure */
@@ -72,6 +77,8 @@ struct spi_statistics {
72 77
73#define SPI_STATISTICS_HISTO_SIZE 17 78#define SPI_STATISTICS_HISTO_SIZE 17
74 unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; 79 unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
80
81 unsigned long transfers_split_maxsize;
75}; 82};
76 83
77void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 84void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
@@ -303,6 +310,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
303 * @min_speed_hz: Lowest supported transfer speed 310 * @min_speed_hz: Lowest supported transfer speed
304 * @max_speed_hz: Highest supported transfer speed 311 * @max_speed_hz: Highest supported transfer speed
305 * @flags: other constraints relevant to this driver 312 * @flags: other constraints relevant to this driver
313 * @max_transfer_size: function that returns the max transfer size for
314 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
306 * @bus_lock_spinlock: spinlock for SPI bus locking 315 * @bus_lock_spinlock: spinlock for SPI bus locking
307 * @bus_lock_mutex: mutex for SPI bus locking 316 * @bus_lock_mutex: mutex for SPI bus locking
308 * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use 317 * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +370,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
361 * @handle_err: the subsystem calls the driver to handle an error that occurs 370 * @handle_err: the subsystem calls the driver to handle an error that occurs
362 * in the generic implementation of transfer_one_message(). 371 * in the generic implementation of transfer_one_message().
363 * @unprepare_message: undo any work done by prepare_message(). 372 * @unprepare_message: undo any work done by prepare_message().
373 * @spi_flash_read: to support spi-controller hardwares that provide
374 * accelerated interface to read from flash devices.
364 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 375 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
365 * number. Any individual value may be -ENOENT for CS lines that 376 * number. Any individual value may be -ENOENT for CS lines that
366 * are not GPIOs (driven by the SPI controller itself). 377 * are not GPIOs (driven by the SPI controller itself).
@@ -369,6 +380,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
369 * @dma_rx: DMA receive channel 380 * @dma_rx: DMA receive channel
370 * @dummy_rx: dummy receive buffer for full-duplex devices 381 * @dummy_rx: dummy receive buffer for full-duplex devices
371 * @dummy_tx: dummy transmit buffer for full-duplex devices 382 * @dummy_tx: dummy transmit buffer for full-duplex devices
383 * @fw_translate_cs: If the boot firmware uses different numbering scheme
384 * what Linux expects, this optional hook can be used to translate
385 * between the two.
372 * 386 *
373 * Each SPI master controller can communicate with one or more @spi_device 387 * Each SPI master controller can communicate with one or more @spi_device
374 * children. These make a small bus, sharing MOSI, MISO and SCK signals 388 * children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -513,6 +527,8 @@ struct spi_master {
513 struct spi_message *message); 527 struct spi_message *message);
514 int (*unprepare_message)(struct spi_master *master, 528 int (*unprepare_message)(struct spi_master *master,
515 struct spi_message *message); 529 struct spi_message *message);
530 int (*spi_flash_read)(struct spi_device *spi,
531 struct spi_flash_read_message *msg);
516 532
517 /* 533 /*
518 * These hooks are for drivers that use a generic implementation 534 * These hooks are for drivers that use a generic implementation
@@ -537,6 +553,8 @@ struct spi_master {
537 /* dummy data for full duplex devices */ 553 /* dummy data for full duplex devices */
538 void *dummy_rx; 554 void *dummy_rx;
539 void *dummy_tx; 555 void *dummy_tx;
556
557 int (*fw_translate_cs)(struct spi_master *master, unsigned cs);
540}; 558};
541 559
542static inline void *spi_master_get_devdata(struct spi_master *master) 560static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -582,6 +600,38 @@ extern void spi_unregister_master(struct spi_master *master);
582 600
583extern struct spi_master *spi_busnum_to_master(u16 busnum); 601extern struct spi_master *spi_busnum_to_master(u16 busnum);
584 602
603/*
604 * SPI resource management while processing a SPI message
605 */
606
607typedef void (*spi_res_release_t)(struct spi_master *master,
608 struct spi_message *msg,
609 void *res);
610
611/**
612 * struct spi_res - spi resource management structure
613 * @entry: list entry
614 * @release: release code called prior to freeing this resource
615 * @data: extra data allocated for the specific use-case
616 *
617 * this is based on ideas from devres, but focused on life-cycle
618 * management during spi_message processing
619 */
620struct spi_res {
621 struct list_head entry;
622 spi_res_release_t release;
623 unsigned long long data[]; /* guarantee ull alignment */
624};
625
626extern void *spi_res_alloc(struct spi_device *spi,
627 spi_res_release_t release,
628 size_t size, gfp_t gfp);
629extern void spi_res_add(struct spi_message *message, void *res);
630extern void spi_res_free(void *res);
631
632extern void spi_res_release(struct spi_master *master,
633 struct spi_message *message);
634
585/*---------------------------------------------------------------------------*/ 635/*---------------------------------------------------------------------------*/
586 636
587/* 637/*
@@ -720,6 +770,7 @@ struct spi_transfer {
720 * @status: zero for success, else negative errno 770 * @status: zero for success, else negative errno
721 * @queue: for use by whichever driver currently owns the message 771 * @queue: for use by whichever driver currently owns the message
722 * @state: for use by whichever driver currently owns the message 772 * @state: for use by whichever driver currently owns the message
773 * @resources: for resource management when the spi message is processed
723 * 774 *
724 * A @spi_message is used to execute an atomic sequence of data transfers, 775 * A @spi_message is used to execute an atomic sequence of data transfers,
725 * each represented by a struct spi_transfer. The sequence is "atomic" 776 * each represented by a struct spi_transfer. The sequence is "atomic"
@@ -766,11 +817,15 @@ struct spi_message {
766 */ 817 */
767 struct list_head queue; 818 struct list_head queue;
768 void *state; 819 void *state;
820
821 /* list of spi_res reources when the spi message is processed */
822 struct list_head resources;
769}; 823};
770 824
771static inline void spi_message_init_no_memset(struct spi_message *m) 825static inline void spi_message_init_no_memset(struct spi_message *m)
772{ 826{
773 INIT_LIST_HEAD(&m->transfers); 827 INIT_LIST_HEAD(&m->transfers);
828 INIT_LIST_HEAD(&m->resources);
774} 829}
775 830
776static inline void spi_message_init(struct spi_message *m) 831static inline void spi_message_init(struct spi_message *m)
@@ -854,6 +909,60 @@ spi_max_transfer_size(struct spi_device *spi)
854 909
855/*---------------------------------------------------------------------------*/ 910/*---------------------------------------------------------------------------*/
856 911
912/* SPI transfer replacement methods which make use of spi_res */
913
914struct spi_replaced_transfers;
915typedef void (*spi_replaced_release_t)(struct spi_master *master,
916 struct spi_message *msg,
917 struct spi_replaced_transfers *res);
918/**
919 * struct spi_replaced_transfers - structure describing the spi_transfer
920 * replacements that have occurred
921 * so that they can get reverted
922 * @release: some extra release code to get executed prior to
923 * relasing this structure
924 * @extradata: pointer to some extra data if requested or NULL
925 * @replaced_transfers: transfers that have been replaced and which need
926 * to get restored
927 * @replaced_after: the transfer after which the @replaced_transfers
928 * are to get re-inserted
929 * @inserted: number of transfers inserted
930 * @inserted_transfers: array of spi_transfers of array-size @inserted,
931 * that have been replacing replaced_transfers
932 *
933 * note: that @extradata will point to @inserted_transfers[@inserted]
934 * if some extra allocation is requested, so alignment will be the same
935 * as for spi_transfers
936 */
937struct spi_replaced_transfers {
938 spi_replaced_release_t release;
939 void *extradata;
940 struct list_head replaced_transfers;
941 struct list_head *replaced_after;
942 size_t inserted;
943 struct spi_transfer inserted_transfers[];
944};
945
946extern struct spi_replaced_transfers *spi_replace_transfers(
947 struct spi_message *msg,
948 struct spi_transfer *xfer_first,
949 size_t remove,
950 size_t insert,
951 spi_replaced_release_t release,
952 size_t extradatasize,
953 gfp_t gfp);
954
955/*---------------------------------------------------------------------------*/
956
957/* SPI transfer transformation methods */
958
959extern int spi_split_transfers_maxsize(struct spi_master *master,
960 struct spi_message *msg,
961 size_t maxsize,
962 gfp_t gfp);
963
964/*---------------------------------------------------------------------------*/
965
857/* All these synchronous SPI transfer routines are utilities layered 966/* All these synchronous SPI transfer routines are utilities layered
858 * over the core async transfer primitive. Here, "synchronous" means 967 * over the core async transfer primitive. Here, "synchronous" means
859 * they will sleep uninterruptibly until the async transfer completes. 968 * they will sleep uninterruptibly until the async transfer completes.
@@ -1019,6 +1128,42 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
1019 return be16_to_cpu(result); 1128 return be16_to_cpu(result);
1020} 1129}
1021 1130
1131/**
1132 * struct spi_flash_read_message - flash specific information for
1133 * spi-masters that provide accelerated flash read interfaces
1134 * @buf: buffer to read data
1135 * @from: offset within the flash from where data is to be read
1136 * @len: length of data to be read
1137 * @retlen: actual length of data read
1138 * @read_opcode: read_opcode to be used to communicate with flash
1139 * @addr_width: number of address bytes
1140 * @dummy_bytes: number of dummy bytes
1141 * @opcode_nbits: number of lines to send opcode
1142 * @addr_nbits: number of lines to send address
1143 * @data_nbits: number of lines for data
1144 */
1145struct spi_flash_read_message {
1146 void *buf;
1147 loff_t from;
1148 size_t len;
1149 size_t retlen;
1150 u8 read_opcode;
1151 u8 addr_width;
1152 u8 dummy_bytes;
1153 u8 opcode_nbits;
1154 u8 addr_nbits;
1155 u8 data_nbits;
1156};
1157
1158/* SPI core interface for flash read support */
1159static inline bool spi_flash_read_supported(struct spi_device *spi)
1160{
1161 return spi->master->spi_flash_read ? true : false;
1162}
1163
1164int spi_flash_read(struct spi_device *spi,
1165 struct spi_flash_read_message *msg);
1166
1022/*---------------------------------------------------------------------------*/ 1167/*---------------------------------------------------------------------------*/
1023 1168
1024/* 1169/*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index f5f80c5643ac..dc8eb63c6568 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work);
99 } 99 }
100 100
101/* 101/*
102 * define and init a srcu struct at build time. 102 * Define and initialize a srcu struct at build time.
103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. 103 * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
104 *
105 * Note that although DEFINE_STATIC_SRCU() hides the name from other
106 * files, the per-CPU variable rules nevertheless require that the
107 * chosen name be globally unique. These rules also prohibit use of
108 * DEFINE_STATIC_SRCU() within a function. If these rules are too
109 * restrictive, declare the srcu_struct manually. For example, in
110 * each file:
111 *
112 * static struct srcu_struct my_srcu;
113 *
114 * Then, before the first use of each my_srcu, manually initialize it:
115 *
116 * init_srcu_struct(&my_srcu);
117 *
118 * See include/linux/percpu-defs.h for the rules on per-CPU variables.
104 */ 119 */
105#define __DEFINE_SRCU(name, is_static) \ 120#define __DEFINE_SRCU(name, is_static) \
106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 121 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
new file mode 100644
index 000000000000..7978b3e2c1e1
--- /dev/null
+++ b/include/linux/stackdepot.h
@@ -0,0 +1,32 @@
1/*
2 * A generic stack depot implementation
3 *
4 * Author: Alexander Potapenko <glider@google.com>
5 * Copyright (C) 2016 Google, Inc.
6 *
7 * Based on code by Dmitry Chernenkov.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21#ifndef _LINUX_STACKDEPOT_H
22#define _LINUX_STACKDEPOT_H
23
24typedef u32 depot_stack_handle_t;
25
26struct stack_trace;
27
28depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
29
30void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
31
32#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 9d0083d364e6..1a79ed8e43da 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -67,6 +67,16 @@ struct stm_device;
67 * description. That is, the lowest master that can be allocated to software 67 * description. That is, the lowest master that can be allocated to software
68 * writers is @sw_start and data from this writer will appear is @sw_start 68 * writers is @sw_start and data from this writer will appear is @sw_start
69 * master in the STP stream. 69 * master in the STP stream.
70 *
71 * The @packet callback should adhere to the following rules:
72 * 1) it must return the number of bytes it consumed from the payload;
73 * 2) therefore, if it sent a packet that does not have payload (like FLAG),
74 * it must return zero;
75 * 3) if it does not support the requested packet type/flag combination,
76 * it must return -ENOTSUPP.
77 *
78 * The @unlink callback is called when there are no more active writers so
79 * that the master/channel can be quiesced.
70 */ 80 */
71struct stm_data { 81struct stm_data {
72 const char *name; 82 const char *name;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 881a79d52467..e6bc30a42a74 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -90,11 +90,24 @@ struct stmmac_dma_cfg {
90 int pbl; 90 int pbl;
91 int fixed_burst; 91 int fixed_burst;
92 int mixed_burst; 92 int mixed_burst;
93 int burst_len; 93 bool aal;
94};
95
96#define AXI_BLEN 7
97struct stmmac_axi {
98 bool axi_lpi_en;
99 bool axi_xit_frm;
100 u32 axi_wr_osr_lmt;
101 u32 axi_rd_osr_lmt;
102 bool axi_kbbe;
103 bool axi_axi_all;
104 u32 axi_blen[AXI_BLEN];
105 bool axi_fb;
106 bool axi_mb;
107 bool axi_rb;
94}; 108};
95 109
96struct plat_stmmacenet_data { 110struct plat_stmmacenet_data {
97 char *phy_bus_name;
98 int bus_id; 111 int bus_id;
99 int phy_addr; 112 int phy_addr;
100 int interface; 113 int interface;
@@ -123,5 +136,6 @@ struct plat_stmmacenet_data {
123 int (*init)(struct platform_device *pdev, void *priv); 136 int (*init)(struct platform_device *pdev, void *priv);
124 void (*exit)(struct platform_device *pdev, void *priv); 137 void (*exit)(struct platform_device *pdev, void *priv);
125 void *bsp_priv; 138 void *bsp_priv;
139 struct stmmac_axi *axi;
126}; 140};
127#endif 141#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 9eebc66d957a..d3993a79a325 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -128,7 +128,13 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
128extern void argv_free(char **argv); 128extern void argv_free(char **argv);
129 129
130extern bool sysfs_streq(const char *s1, const char *s2); 130extern bool sysfs_streq(const char *s1, const char *s2);
131extern int strtobool(const char *s, bool *res); 131extern int kstrtobool(const char *s, bool *res);
132static inline int strtobool(const char *s, bool *res)
133{
134 return kstrtobool(s, res);
135}
136
137int match_string(const char * const *array, size_t n, const char *string);
132 138
133#ifdef CONFIG_BINARY_PRINTF 139#ifdef CONFIG_BINARY_PRINTF
134int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); 140int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 1ecf13e148b8..6a241a277249 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -21,10 +21,17 @@
21#include <linux/utsname.h> 21#include <linux/utsname.h>
22 22
23/* 23/*
24 * Maximum size of AUTH_NONE authentication information, in XDR words.
25 */
26#define NUL_CALLSLACK (4)
27#define NUL_REPLYSLACK (2)
28
29/*
24 * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes, 30 * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
25 * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes. 31 * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
26 */ 32 */
27#define UNX_MAXNODENAME __NEW_UTS_LEN 33#define UNX_MAXNODENAME __NEW_UTS_LEN
34#define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME))
28 35
29struct rpcsec_gss_info; 36struct rpcsec_gss_info;
30 37
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 131032f15cc1..9a7ddbaf116e 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -25,6 +25,7 @@
25#include <asm/signal.h> 25#include <asm/signal.h>
26#include <linux/path.h> 26#include <linux/path.h>
27#include <net/ipv6.h> 27#include <net/ipv6.h>
28#include <linux/sunrpc/xprtmultipath.h>
28 29
29struct rpc_inode; 30struct rpc_inode;
30 31
@@ -67,6 +68,7 @@ struct rpc_clnt {
67#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 68#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
68 struct dentry *cl_debugfs; /* debugfs directory */ 69 struct dentry *cl_debugfs; /* debugfs directory */
69#endif 70#endif
71 struct rpc_xprt_iter cl_xpi;
70}; 72};
71 73
72/* 74/*
@@ -139,7 +141,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
139 struct rpc_xprt *xprt); 141 struct rpc_xprt *xprt);
140struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 142struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
141 const struct rpc_program *, u32); 143 const struct rpc_program *, u32);
142void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
143struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 144struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
144struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *, 145struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *,
145 rpc_authflavor_t); 146 rpc_authflavor_t);
@@ -181,6 +182,21 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
181const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); 182const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
182int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); 183int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
183 184
185int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt,
186 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *),
187 void *data);
188
189int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
190 struct rpc_xprt_switch *xps,
191 struct rpc_xprt *xprt,
192 void *dummy);
193int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
194 int (*setup)(struct rpc_clnt *,
195 struct rpc_xprt_switch *,
196 struct rpc_xprt *,
197 void *),
198 void *data);
199
184const char *rpc_proc_name(const struct rpc_task *task); 200const char *rpc_proc_name(const struct rpc_task *task);
185#endif /* __KERNEL__ */ 201#endif /* __KERNEL__ */
186#endif /* _LINUX_SUNRPC_CLNT_H */ 202#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index df02a4188487..7df625d41e35 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -36,7 +36,7 @@
36 * 36 *
37 */ 37 */
38 38
39#include <linux/crypto.h> 39#include <crypto/skcipher.h>
40#include <linux/sunrpc/auth_gss.h> 40#include <linux/sunrpc/auth_gss.h>
41#include <linux/sunrpc/gss_err.h> 41#include <linux/sunrpc/gss_err.h>
42#include <linux/sunrpc/gss_asn1.h> 42#include <linux/sunrpc/gss_asn1.h>
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
71 const u32 keyed_cksum; /* is it a keyed cksum? */ 71 const u32 keyed_cksum; /* is it a keyed cksum? */
72 const u32 keybytes; /* raw key len, in bytes */ 72 const u32 keybytes; /* raw key len, in bytes */
73 const u32 keylength; /* final key len, in bytes */ 73 const u32 keylength; /* final key len, in bytes */
74 u32 (*encrypt) (struct crypto_blkcipher *tfm, 74 u32 (*encrypt) (struct crypto_skcipher *tfm,
75 void *iv, void *in, void *out, 75 void *iv, void *in, void *out,
76 int length); /* encryption function */ 76 int length); /* encryption function */
77 u32 (*decrypt) (struct crypto_blkcipher *tfm, 77 u32 (*decrypt) (struct crypto_skcipher *tfm,
78 void *iv, void *in, void *out, 78 void *iv, void *in, void *out,
79 int length); /* decryption function */ 79 int length); /* decryption function */
80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, 80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
98 u32 enctype; 98 u32 enctype;
99 u32 flags; 99 u32 flags;
100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ 100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
101 struct crypto_blkcipher *enc; 101 struct crypto_skcipher *enc;
102 struct crypto_blkcipher *seq; 102 struct crypto_skcipher *seq;
103 struct crypto_blkcipher *acceptor_enc; 103 struct crypto_skcipher *acceptor_enc;
104 struct crypto_blkcipher *initiator_enc; 104 struct crypto_skcipher *initiator_enc;
105 struct crypto_blkcipher *acceptor_enc_aux; 105 struct crypto_skcipher *acceptor_enc_aux;
106 struct crypto_blkcipher *initiator_enc_aux; 106 struct crypto_skcipher *initiator_enc_aux;
107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ 107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
108 u8 cksum[GSS_KRB5_MAX_KEYLEN]; 108 u8 cksum[GSS_KRB5_MAX_KEYLEN];
109 s32 endtime; 109 s32 endtime;
@@ -262,24 +262,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
262 262
263 263
264u32 264u32
265krb5_encrypt(struct crypto_blkcipher *key, 265krb5_encrypt(struct crypto_skcipher *key,
266 void *iv, void *in, void *out, int length); 266 void *iv, void *in, void *out, int length);
267 267
268u32 268u32
269krb5_decrypt(struct crypto_blkcipher *key, 269krb5_decrypt(struct crypto_skcipher *key,
270 void *iv, void *in, void *out, int length); 270 void *iv, void *in, void *out, int length);
271 271
272int 272int
273gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf, 273gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
274 int offset, struct page **pages); 274 int offset, struct page **pages);
275 275
276int 276int
277gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf, 277gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
278 int offset); 278 int offset);
279 279
280s32 280s32
281krb5_make_seq_num(struct krb5_ctx *kctx, 281krb5_make_seq_num(struct krb5_ctx *kctx,
282 struct crypto_blkcipher *key, 282 struct crypto_skcipher *key,
283 int direction, 283 int direction,
284 u32 seqnum, unsigned char *cksum, unsigned char *buf); 284 u32 seqnum, unsigned char *cksum, unsigned char *buf);
285 285
@@ -320,12 +320,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
320 320
321int 321int
322krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, 322krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
323 struct crypto_blkcipher *cipher, 323 struct crypto_skcipher *cipher,
324 unsigned char *cksum); 324 unsigned char *cksum);
325 325
326int 326int
327krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, 327krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
328 struct crypto_blkcipher *cipher, 328 struct crypto_skcipher *cipher,
329 s32 seqnum); 329 s32 seqnum);
330void 330void
331gss_krb5_make_confounder(char *p, u32 conflen); 331gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index f33c5a4d6fe4..3b1ff38f0c37 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -93,6 +93,12 @@ struct rpcrdma_msg {
93 __be32 rm_pempty[3]; /* 3 empty chunk lists */ 93 __be32 rm_pempty[3]; /* 3 empty chunk lists */
94 } rm_padded; 94 } rm_padded;
95 95
96 struct {
97 __be32 rm_err;
98 __be32 rm_vers_low;
99 __be32 rm_vers_high;
100 } rm_error;
101
96 __be32 rm_chunks[0]; /* read, write and reply chunks */ 102 __be32 rm_chunks[0]; /* read, write and reply chunks */
97 103
98 } rm_body; 104 } rm_body;
@@ -102,17 +108,13 @@ struct rpcrdma_msg {
102 * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks 108 * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
103 */ 109 */
104#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) 110#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
111#define RPCRDMA_HDRLEN_ERR (sizeof(__be32) * 5)
105 112
106enum rpcrdma_errcode { 113enum rpcrdma_errcode {
107 ERR_VERS = 1, 114 ERR_VERS = 1,
108 ERR_CHUNK = 2 115 ERR_CHUNK = 2
109}; 116};
110 117
111struct rpcrdma_err_vers {
112 uint32_t rdma_vers_low; /* Version range supported by peer */
113 uint32_t rdma_vers_high;
114};
115
116enum rpcrdma_proc { 118enum rpcrdma_proc {
117 RDMA_MSG = 0, /* An RPC call or reply msg */ 119 RDMA_MSG = 0, /* An RPC call or reply msg */
118 RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */ 120 RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d703f0ef37d8..05a1809c44d9 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -42,40 +42,43 @@ struct rpc_wait {
42 */ 42 */
43struct rpc_task { 43struct rpc_task {
44 atomic_t tk_count; /* Reference count */ 44 atomic_t tk_count; /* Reference count */
45 int tk_status; /* result of last operation */
45 struct list_head tk_task; /* global list of tasks */ 46 struct list_head tk_task; /* global list of tasks */
46 struct rpc_clnt * tk_client; /* RPC client */
47 struct rpc_rqst * tk_rqstp; /* RPC request */
48
49 /*
50 * RPC call state
51 */
52 struct rpc_message tk_msg; /* RPC call info */
53 47
54 /* 48 /*
55 * callback to be executed after waking up 49 * callback to be executed after waking up
56 * action next procedure for async tasks 50 * action next procedure for async tasks
57 * tk_ops caller callbacks
58 */ 51 */
59 void (*tk_callback)(struct rpc_task *); 52 void (*tk_callback)(struct rpc_task *);
60 void (*tk_action)(struct rpc_task *); 53 void (*tk_action)(struct rpc_task *);
61 const struct rpc_call_ops *tk_ops;
62 void * tk_calldata;
63 54
64 unsigned long tk_timeout; /* timeout for rpc_sleep() */ 55 unsigned long tk_timeout; /* timeout for rpc_sleep() */
65 unsigned long tk_runstate; /* Task run status */ 56 unsigned long tk_runstate; /* Task run status */
66 struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could 57
67 * be any workqueue
68 */
69 struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */ 58 struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
70 union { 59 union {
71 struct work_struct tk_work; /* Async task work queue */ 60 struct work_struct tk_work; /* Async task work queue */
72 struct rpc_wait tk_wait; /* RPC wait */ 61 struct rpc_wait tk_wait; /* RPC wait */
73 } u; 62 } u;
74 63
64 /*
65 * RPC call state
66 */
67 struct rpc_message tk_msg; /* RPC call info */
68 void * tk_calldata; /* Caller private data */
69 const struct rpc_call_ops *tk_ops; /* Caller callbacks */
70
71 struct rpc_clnt * tk_client; /* RPC client */
72 struct rpc_xprt * tk_xprt; /* Transport */
73
74 struct rpc_rqst * tk_rqstp; /* RPC request */
75
76 struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
77 * be any workqueue
78 */
75 ktime_t tk_start; /* RPC task init timestamp */ 79 ktime_t tk_start; /* RPC task init timestamp */
76 80
77 pid_t tk_owner; /* Process id for batching tasks */ 81 pid_t tk_owner; /* Process id for batching tasks */
78 int tk_status; /* result of last operation */
79 unsigned short tk_flags; /* misc flags */ 82 unsigned short tk_flags; /* misc flags */
80 unsigned short tk_timeouts; /* maj timeouts */ 83 unsigned short tk_timeouts; /* maj timeouts */
81 84
@@ -100,6 +103,7 @@ struct rpc_call_ops {
100struct rpc_task_setup { 103struct rpc_task_setup {
101 struct rpc_task *task; 104 struct rpc_task *task;
102 struct rpc_clnt *rpc_client; 105 struct rpc_clnt *rpc_client;
106 struct rpc_xprt *rpc_xprt;
103 const struct rpc_message *rpc_message; 107 const struct rpc_message *rpc_message;
104 const struct rpc_call_ops *callback_ops; 108 const struct rpc_call_ops *callback_ops;
105 void *callback_data; 109 void *callback_data;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..7ca44fb5b675 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
129 * 129 *
130 * These happen to all be powers of 2, which is not strictly 130 * These happen to all be powers of 2, which is not strictly
131 * necessary but helps enforce the real limitation, which is 131 * necessary but helps enforce the real limitation, which is
132 * that they should be multiples of PAGE_CACHE_SIZE. 132 * that they should be multiples of PAGE_SIZE.
133 * 133 *
134 * For UDP transports, a block plus NFS,RPC, and UDP headers 134 * For UDP transports, a block plus NFS,RPC, and UDP headers
135 * has to fit into the IP datagram limit of 64K. The largest 135 * has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 5322fea6fe4c..3081339968c3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -75,8 +75,10 @@ struct svc_rdma_op_ctxt {
75 struct svc_rdma_fastreg_mr *frmr; 75 struct svc_rdma_fastreg_mr *frmr;
76 int hdr_count; 76 int hdr_count;
77 struct xdr_buf arg; 77 struct xdr_buf arg;
78 struct ib_cqe cqe;
79 struct ib_cqe reg_cqe;
80 struct ib_cqe inv_cqe;
78 struct list_head dto_q; 81 struct list_head dto_q;
79 enum ib_wr_opcode wr_op;
80 enum ib_wc_status wc_status; 82 enum ib_wc_status wc_status;
81 u32 byte_len; 83 u32 byte_len;
82 u32 position; 84 u32 position;
@@ -174,8 +176,6 @@ struct svcxprt_rdma {
174 struct work_struct sc_work; 176 struct work_struct sc_work;
175}; 177};
176/* sc_flags */ 178/* sc_flags */
177#define RDMAXPRT_RQ_PENDING 1
178#define RDMAXPRT_SQ_PENDING 2
179#define RDMAXPRT_CONN_PENDING 3 179#define RDMAXPRT_CONN_PENDING 3
180 180
181#define RPCRDMA_LISTEN_BACKLOG 10 181#define RPCRDMA_LISTEN_BACKLOG 10
@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
199 struct xdr_buf *rcvbuf); 199 struct xdr_buf *rcvbuf);
200 200
201/* svc_rdma_marshal.c */ 201/* svc_rdma_marshal.c */
202extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); 202extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *);
203extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 203extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
204 struct rpcrdma_msg *, 204 struct rpcrdma_msg *,
205 enum rpcrdma_errcode, __be32 *); 205 enum rpcrdma_errcode, __be32 *);
@@ -224,16 +224,22 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
224 224
225/* svc_rdma_sendto.c */ 225/* svc_rdma_sendto.c */
226extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, 226extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
227 struct svc_rdma_req_map *); 227 struct svc_rdma_req_map *, bool);
228extern int svc_rdma_sendto(struct svc_rqst *); 228extern int svc_rdma_sendto(struct svc_rqst *);
229extern struct rpcrdma_read_chunk * 229extern struct rpcrdma_read_chunk *
230 svc_rdma_get_read_chunk(struct rpcrdma_msg *); 230 svc_rdma_get_read_chunk(struct rpcrdma_msg *);
231extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
232 int);
231 233
232/* svc_rdma_transport.c */ 234/* svc_rdma_transport.c */
235extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
236extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *);
237extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
238extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
239extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
233extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 240extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
234extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
235 enum rpcrdma_errcode);
236extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t); 241extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
242extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
237extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 243extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
238extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 244extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
239extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); 245extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 69ef5b3ab038..fb0d212e0d3a 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -13,6 +13,7 @@
13#include <linux/socket.h> 13#include <linux/socket.h>
14#include <linux/in.h> 14#include <linux/in.h>
15#include <linux/ktime.h> 15#include <linux/ktime.h>
16#include <linux/kref.h>
16#include <linux/sunrpc/sched.h> 17#include <linux/sunrpc/sched.h>
17#include <linux/sunrpc/xdr.h> 18#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/msg_prot.h> 19#include <linux/sunrpc/msg_prot.h>
@@ -166,7 +167,7 @@ enum xprt_transports {
166}; 167};
167 168
168struct rpc_xprt { 169struct rpc_xprt {
169 atomic_t count; /* Reference count */ 170 struct kref kref; /* Reference count */
170 struct rpc_xprt_ops * ops; /* transport methods */ 171 struct rpc_xprt_ops * ops; /* transport methods */
171 172
172 const struct rpc_timeout *timeout; /* timeout parms */ 173 const struct rpc_timeout *timeout; /* timeout parms */
@@ -197,6 +198,11 @@ struct rpc_xprt {
197 unsigned int bind_index; /* bind function index */ 198 unsigned int bind_index; /* bind function index */
198 199
199 /* 200 /*
201 * Multipath
202 */
203 struct list_head xprt_switch;
204
205 /*
200 * Connection of transports 206 * Connection of transports
201 */ 207 */
202 unsigned long bind_timeout, 208 unsigned long bind_timeout,
@@ -256,6 +262,7 @@ struct rpc_xprt {
256 struct dentry *debugfs; /* debugfs directory */ 262 struct dentry *debugfs; /* debugfs directory */
257 atomic_t inject_disconnect; 263 atomic_t inject_disconnect;
258#endif 264#endif
265 struct rcu_head rcu;
259}; 266};
260 267
261#if defined(CONFIG_SUNRPC_BACKCHANNEL) 268#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -318,24 +325,13 @@ int xprt_adjust_timeout(struct rpc_rqst *req);
318void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); 325void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
319void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); 326void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
320void xprt_release(struct rpc_task *task); 327void xprt_release(struct rpc_task *task);
328struct rpc_xprt * xprt_get(struct rpc_xprt *xprt);
321void xprt_put(struct rpc_xprt *xprt); 329void xprt_put(struct rpc_xprt *xprt);
322struct rpc_xprt * xprt_alloc(struct net *net, size_t size, 330struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
323 unsigned int num_prealloc, 331 unsigned int num_prealloc,
324 unsigned int max_req); 332 unsigned int max_req);
325void xprt_free(struct rpc_xprt *); 333void xprt_free(struct rpc_xprt *);
326 334
327/**
328 * xprt_get - return a reference to an RPC transport.
329 * @xprt: pointer to the transport
330 *
331 */
332static inline struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
333{
334 if (atomic_inc_not_zero(&xprt->count))
335 return xprt;
336 return NULL;
337}
338
339static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) 335static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
340{ 336{
341 return p + xprt->tsh_size; 337 return p + xprt->tsh_size;
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
new file mode 100644
index 000000000000..5a9acffa41be
--- /dev/null
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -0,0 +1,69 @@
1/*
2 * RPC client multipathing definitions
3 *
4 * Copyright (c) 2015, 2016, Primary Data, Inc. All rights reserved.
5 *
6 * Trond Myklebust <trond.myklebust@primarydata.com>
7 */
8#ifndef _NET_SUNRPC_XPRTMULTIPATH_H
9#define _NET_SUNRPC_XPRTMULTIPATH_H
10
11struct rpc_xprt_iter_ops;
12struct rpc_xprt_switch {
13 spinlock_t xps_lock;
14 struct kref xps_kref;
15
16 unsigned int xps_nxprts;
17 struct list_head xps_xprt_list;
18
19 struct net * xps_net;
20
21 const struct rpc_xprt_iter_ops *xps_iter_ops;
22
23 struct rcu_head xps_rcu;
24};
25
26struct rpc_xprt_iter {
27 struct rpc_xprt_switch __rcu *xpi_xpswitch;
28 struct rpc_xprt * xpi_cursor;
29
30 const struct rpc_xprt_iter_ops *xpi_ops;
31};
32
33
34struct rpc_xprt_iter_ops {
35 void (*xpi_rewind)(struct rpc_xprt_iter *);
36 struct rpc_xprt *(*xpi_xprt)(struct rpc_xprt_iter *);
37 struct rpc_xprt *(*xpi_next)(struct rpc_xprt_iter *);
38};
39
40extern struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt,
41 gfp_t gfp_flags);
42
43extern struct rpc_xprt_switch *xprt_switch_get(struct rpc_xprt_switch *xps);
44extern void xprt_switch_put(struct rpc_xprt_switch *xps);
45
46extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps);
47
48extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
49 struct rpc_xprt *xprt);
50extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
51 struct rpc_xprt *xprt);
52
53extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
54 struct rpc_xprt_switch *xps);
55
56extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
57 struct rpc_xprt_switch *xps);
58
59extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi);
60
61extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
62 struct rpc_xprt_iter *xpi,
63 struct rpc_xprt_switch *newswitch);
64
65extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi);
66extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
67extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
68
69#endif
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b7b279b54504..767190b01363 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -54,8 +54,6 @@
54 54
55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */ 55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
56 56
57#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
58
59/* Memory registration strategies, by number. 57/* Memory registration strategies, by number.
60 * This is part of a kernel / user space API. Do not remove. */ 58 * This is part of a kernel / user space API. Do not remove. */
61enum rpcrdma_memreg { 59enum rpcrdma_memreg {
diff --git a/include/linux/swait.h b/include/linux/swait.h
new file mode 100644
index 000000000000..c1f9c62a8a50
--- /dev/null
+++ b/include/linux/swait.h
@@ -0,0 +1,172 @@
1#ifndef _LINUX_SWAIT_H
2#define _LINUX_SWAIT_H
3
4#include <linux/list.h>
5#include <linux/stddef.h>
6#include <linux/spinlock.h>
7#include <asm/current.h>
8
9/*
10 * Simple wait queues
11 *
12 * While these are very similar to the other/complex wait queues (wait.h) the
13 * most important difference is that the simple waitqueue allows for
14 * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
15 * times.
16 *
17 * In order to make this so, we had to drop a fair number of features of the
18 * other waitqueue code; notably:
19 *
20 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
21 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
22 * sleeper state.
23 *
24 * - the exclusive mode; because this requires preserving the list order
25 * and this is hard.
26 *
27 * - custom wake functions; because you cannot give any guarantees about
28 * random code.
29 *
30 * As a side effect of this; the data structures are slimmer.
31 *
32 * One would recommend using this wait queue where possible.
33 */
34
35struct task_struct;
36
37struct swait_queue_head {
38 raw_spinlock_t lock;
39 struct list_head task_list;
40};
41
42struct swait_queue {
43 struct task_struct *task;
44 struct list_head task_list;
45};
46
47#define __SWAITQUEUE_INITIALIZER(name) { \
48 .task = current, \
49 .task_list = LIST_HEAD_INIT((name).task_list), \
50}
51
52#define DECLARE_SWAITQUEUE(name) \
53 struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
54
55#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
56 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
57 .task_list = LIST_HEAD_INIT((name).task_list), \
58}
59
60#define DECLARE_SWAIT_QUEUE_HEAD(name) \
61 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
62
63extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
64 struct lock_class_key *key);
65
66#define init_swait_queue_head(q) \
67 do { \
68 static struct lock_class_key __key; \
69 __init_swait_queue_head((q), #q, &__key); \
70 } while (0)
71
72#ifdef CONFIG_LOCKDEP
73# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 ({ init_swait_queue_head(&name); name; })
75# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
76 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77#else
78# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
79 DECLARE_SWAIT_QUEUE_HEAD(name)
80#endif
81
82static inline int swait_active(struct swait_queue_head *q)
83{
84 return !list_empty(&q->task_list);
85}
86
87extern void swake_up(struct swait_queue_head *q);
88extern void swake_up_all(struct swait_queue_head *q);
89extern void swake_up_locked(struct swait_queue_head *q);
90
91extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
92extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
93extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
94
95extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
96extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
97
98/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
99#define ___swait_event(wq, condition, state, ret, cmd) \
100({ \
101 struct swait_queue __wait; \
102 long __ret = ret; \
103 \
104 INIT_LIST_HEAD(&__wait.task_list); \
105 for (;;) { \
106 long __int = prepare_to_swait_event(&wq, &__wait, state);\
107 \
108 if (condition) \
109 break; \
110 \
111 if (___wait_is_interruptible(state) && __int) { \
112 __ret = __int; \
113 break; \
114 } \
115 \
116 cmd; \
117 } \
118 finish_swait(&wq, &__wait); \
119 __ret; \
120})
121
122#define __swait_event(wq, condition) \
123 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
124 schedule())
125
126#define swait_event(wq, condition) \
127do { \
128 if (condition) \
129 break; \
130 __swait_event(wq, condition); \
131} while (0)
132
133#define __swait_event_timeout(wq, condition, timeout) \
134 ___swait_event(wq, ___wait_cond_timeout(condition), \
135 TASK_UNINTERRUPTIBLE, timeout, \
136 __ret = schedule_timeout(__ret))
137
138#define swait_event_timeout(wq, condition, timeout) \
139({ \
140 long __ret = timeout; \
141 if (!___wait_cond_timeout(condition)) \
142 __ret = __swait_event_timeout(wq, condition, timeout); \
143 __ret; \
144})
145
146#define __swait_event_interruptible(wq, condition) \
147 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
148 schedule())
149
150#define swait_event_interruptible(wq, condition) \
151({ \
152 int __ret = 0; \
153 if (!(condition)) \
154 __ret = __swait_event_interruptible(wq, condition); \
155 __ret; \
156})
157
158#define __swait_event_interruptible_timeout(wq, condition, timeout) \
159 ___swait_event(wq, ___wait_cond_timeout(condition), \
160 TASK_INTERRUPTIBLE, timeout, \
161 __ret = schedule_timeout(__ret))
162
163#define swait_event_interruptible_timeout(wq, condition, timeout) \
164({ \
165 long __ret = timeout; \
166 if (!___wait_cond_timeout(condition)) \
167 __ret = __swait_event_interruptible_timeout(wq, \
168 condition, timeout); \
169 __ret; \
170})
171
172#endif /* _LINUX_SWAIT_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d18b65c53dbb..ad220359f1b0 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
418extern int page_swapcount(struct page *); 418extern int page_swapcount(struct page *);
419extern int swp_swapcount(swp_entry_t entry); 419extern int swp_swapcount(swp_entry_t entry);
420extern struct swap_info_struct *page_swap_info(struct page *); 420extern struct swap_info_struct *page_swap_info(struct page *);
421extern int reuse_swap_page(struct page *); 421extern bool reuse_swap_page(struct page *, int *);
422extern int try_to_free_swap(struct page *); 422extern int try_to_free_swap(struct page *);
423struct backing_dev_info; 423struct backing_dev_info;
424 424
@@ -433,9 +433,9 @@ struct backing_dev_info;
433#define si_swapinfo(val) \ 433#define si_swapinfo(val) \
434 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 434 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
435/* only sparc can not include linux/pagemap.h in this file 435/* only sparc can not include linux/pagemap.h in this file
436 * so leave page_cache_release and release_pages undeclared... */ 436 * so leave put_page and release_pages undeclared... */
437#define free_page_and_swap_cache(page) \ 437#define free_page_and_swap_cache(page) \
438 page_cache_release(page) 438 put_page(page)
439#define free_pages_and_swap_cache(pages, nr) \ 439#define free_pages_and_swap_cache(pages, nr) \
440 release_pages((pages), (nr), false); 440 release_pages((pages), (nr), false);
441 441
@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
513 return 0; 513 return 0;
514} 514}
515 515
516#define reuse_swap_page(page) \ 516#define reuse_swap_page(page, total_mapcount) \
517 (!PageTransCompound(page) && page_mapcount(page) == 1) 517 (page_trans_huge_mapcount(page, total_mapcount) == 1)
518 518
519static inline int try_to_free_swap(struct page *page) 519static inline int try_to_free_swap(struct page *page)
520{ 520{
@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
533#ifdef CONFIG_MEMCG 533#ifdef CONFIG_MEMCG
534static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 534static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
535{ 535{
536 /* Cgroup2 doesn't have per-cgroup swappiness */
537 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
538 return vm_swappiness;
539
536 /* root ? */ 540 /* root ? */
537 if (mem_cgroup_disabled() || !memcg->css.parent) 541 if (mem_cgroup_disabled() || !memcg->css.parent)
538 return vm_swappiness; 542 return vm_swappiness;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 185815c96433..d795472c54d8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -575,8 +575,14 @@ asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
575 size_t count, loff_t pos); 575 size_t count, loff_t pos);
576asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, 576asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
577 unsigned long vlen, unsigned long pos_l, unsigned long pos_h); 577 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
578asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
579 unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
580 int flags);
578asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, 581asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
579 unsigned long vlen, unsigned long pos_l, unsigned long pos_h); 582 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
583asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
584 unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
585 int flags);
580asmlinkage long sys_getcwd(char __user *buf, unsigned long size); 586asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
581asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); 587asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
582asmlinkage long sys_chdir(const char __user *filename); 588asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b386361ba3e8..7be9b1242354 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
29 return (struct tcphdr *)skb_transport_header(skb); 29 return (struct tcphdr *)skb_transport_header(skb);
30} 30}
31 31
32static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
33{
34 return th->doff * 4;
35}
36
32static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) 37static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
33{ 38{
34 return tcp_hdr(skb)->doff * 4; 39 return __tcp_hdrlen(tcp_hdr(skb));
35} 40}
36 41
37static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) 42static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
@@ -153,6 +158,9 @@ struct tcp_sock {
153 u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 158 u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
154 * total number of segments in. 159 * total number of segments in.
155 */ 160 */
161 u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
162 * total number of data segments in.
163 */
156 u32 rcv_nxt; /* What we want to receive next */ 164 u32 rcv_nxt; /* What we want to receive next */
157 u32 copied_seq; /* Head of yet unread data */ 165 u32 copied_seq; /* Head of yet unread data */
158 u32 rcv_wup; /* rcv_nxt on last window update sent */ 166 u32 rcv_wup; /* rcv_nxt on last window update sent */
@@ -160,6 +168,9 @@ struct tcp_sock {
160 u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 168 u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
161 * The total number of segments sent. 169 * The total number of segments sent.
162 */ 170 */
171 u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
172 * total number of data segments sent.
173 */
163 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 174 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
164 * sum(delta(snd_una)), or how many bytes 175 * sum(delta(snd_una)), or how many bytes
165 * were acked. 176 * were acked.
@@ -256,6 +267,7 @@ struct tcp_sock {
256 u32 prr_delivered; /* Number of newly delivered packets to 267 u32 prr_delivered; /* Number of newly delivered packets to
257 * receiver in Recovery. */ 268 * receiver in Recovery. */
258 u32 prr_out; /* Total number of pkts sent during Recovery. */ 269 u32 prr_out; /* Total number of pkts sent during Recovery. */
270 u32 delivered; /* Total data packets delivered incl. rexmits */
259 271
260 u32 rcv_wnd; /* Current receiver window */ 272 u32 rcv_wnd; /* Current receiver window */
261 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ 273 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e13a1ace50e9..1b8a5a7876ce 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -156,6 +156,7 @@ struct thermal_attr {
156 * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis 156 * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
157 * @devdata: private pointer for device private data 157 * @devdata: private pointer for device private data
158 * @trips: number of trip points the thermal zone supports 158 * @trips: number of trip points the thermal zone supports
159 * @trips_disabled; bitmap for disabled trips
159 * @passive_delay: number of milliseconds to wait between polls when 160 * @passive_delay: number of milliseconds to wait between polls when
160 * performing passive cooling. 161 * performing passive cooling.
161 * @polling_delay: number of milliseconds to wait between polls when 162 * @polling_delay: number of milliseconds to wait between polls when
@@ -191,6 +192,7 @@ struct thermal_zone_device {
191 struct thermal_attr *trip_hyst_attrs; 192 struct thermal_attr *trip_hyst_attrs;
192 void *devdata; 193 void *devdata;
193 int trips; 194 int trips;
195 unsigned long trips_disabled; /* bitmap for disabled trips */
194 int passive_delay; 196 int passive_delay;
195 int polling_delay; 197 int polling_delay;
196 int temperature; 198 int temperature;
@@ -350,8 +352,8 @@ struct thermal_zone_of_device_ops {
350 352
351struct thermal_trip { 353struct thermal_trip {
352 struct device_node *np; 354 struct device_node *np;
353 unsigned long int temperature; 355 int temperature;
354 unsigned long int hysteresis; 356 int hysteresis;
355 enum thermal_trip_type type; 357 enum thermal_trip_type type;
356}; 358};
357 359
@@ -362,6 +364,11 @@ thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
362 const struct thermal_zone_of_device_ops *ops); 364 const struct thermal_zone_of_device_ops *ops);
363void thermal_zone_of_sensor_unregister(struct device *dev, 365void thermal_zone_of_sensor_unregister(struct device *dev,
364 struct thermal_zone_device *tz); 366 struct thermal_zone_device *tz);
367struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
368 struct device *dev, int id, void *data,
369 const struct thermal_zone_of_device_ops *ops);
370void devm_thermal_zone_of_sensor_unregister(struct device *dev,
371 struct thermal_zone_device *tz);
365#else 372#else
366static inline struct thermal_zone_device * 373static inline struct thermal_zone_device *
367thermal_zone_of_sensor_register(struct device *dev, int id, void *data, 374thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
@@ -376,6 +383,19 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
376{ 383{
377} 384}
378 385
386static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
387 struct device *dev, int id, void *data,
388 const struct thermal_zone_of_device_ops *ops)
389{
390 return ERR_PTR(-ENODEV);
391}
392
393static inline
394void devm_thermal_zone_of_sensor_unregister(struct device *dev,
395 struct thermal_zone_device *tz)
396{
397}
398
379#endif 399#endif
380 400
381#if IS_ENABLED(CONFIG_THERMAL) 401#if IS_ENABLED(CONFIG_THERMAL)
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 97fd4e543846..62be0786d6d0 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -97,8 +97,21 @@ static inline void tick_broadcast_exit(void)
97 tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT); 97 tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
98} 98}
99 99
100enum tick_dep_bits {
101 TICK_DEP_BIT_POSIX_TIMER = 0,
102 TICK_DEP_BIT_PERF_EVENTS = 1,
103 TICK_DEP_BIT_SCHED = 2,
104 TICK_DEP_BIT_CLOCK_UNSTABLE = 3
105};
106
107#define TICK_DEP_MASK_NONE 0
108#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
109#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
110#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
111#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
112
100#ifdef CONFIG_NO_HZ_COMMON 113#ifdef CONFIG_NO_HZ_COMMON
101extern int tick_nohz_enabled; 114extern bool tick_nohz_enabled;
102extern int tick_nohz_tick_stopped(void); 115extern int tick_nohz_tick_stopped(void);
103extern void tick_nohz_idle_enter(void); 116extern void tick_nohz_idle_enter(void);
104extern void tick_nohz_idle_exit(void); 117extern void tick_nohz_idle_exit(void);
@@ -154,9 +167,73 @@ static inline int housekeeping_any_cpu(void)
154 return cpumask_any_and(housekeeping_mask, cpu_online_mask); 167 return cpumask_any_and(housekeeping_mask, cpu_online_mask);
155} 168}
156 169
157extern void tick_nohz_full_kick(void); 170extern void tick_nohz_dep_set(enum tick_dep_bits bit);
171extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
172extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
173extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
174extern void tick_nohz_dep_set_task(struct task_struct *tsk,
175 enum tick_dep_bits bit);
176extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
177 enum tick_dep_bits bit);
178extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
179 enum tick_dep_bits bit);
180extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
181 enum tick_dep_bits bit);
182
183/*
184 * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
185 * on top of static keys.
186 */
187static inline void tick_dep_set(enum tick_dep_bits bit)
188{
189 if (tick_nohz_full_enabled())
190 tick_nohz_dep_set(bit);
191}
192
193static inline void tick_dep_clear(enum tick_dep_bits bit)
194{
195 if (tick_nohz_full_enabled())
196 tick_nohz_dep_clear(bit);
197}
198
199static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
200{
201 if (tick_nohz_full_cpu(cpu))
202 tick_nohz_dep_set_cpu(cpu, bit);
203}
204
205static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
206{
207 if (tick_nohz_full_cpu(cpu))
208 tick_nohz_dep_clear_cpu(cpu, bit);
209}
210
211static inline void tick_dep_set_task(struct task_struct *tsk,
212 enum tick_dep_bits bit)
213{
214 if (tick_nohz_full_enabled())
215 tick_nohz_dep_set_task(tsk, bit);
216}
217static inline void tick_dep_clear_task(struct task_struct *tsk,
218 enum tick_dep_bits bit)
219{
220 if (tick_nohz_full_enabled())
221 tick_nohz_dep_clear_task(tsk, bit);
222}
223static inline void tick_dep_set_signal(struct signal_struct *signal,
224 enum tick_dep_bits bit)
225{
226 if (tick_nohz_full_enabled())
227 tick_nohz_dep_set_signal(signal, bit);
228}
229static inline void tick_dep_clear_signal(struct signal_struct *signal,
230 enum tick_dep_bits bit)
231{
232 if (tick_nohz_full_enabled())
233 tick_nohz_dep_clear_signal(signal, bit);
234}
235
158extern void tick_nohz_full_kick_cpu(int cpu); 236extern void tick_nohz_full_kick_cpu(int cpu);
159extern void tick_nohz_full_kick_all(void);
160extern void __tick_nohz_task_switch(void); 237extern void __tick_nohz_task_switch(void);
161#else 238#else
162static inline int housekeeping_any_cpu(void) 239static inline int housekeeping_any_cpu(void)
@@ -166,9 +243,21 @@ static inline int housekeeping_any_cpu(void)
166static inline bool tick_nohz_full_enabled(void) { return false; } 243static inline bool tick_nohz_full_enabled(void) { return false; }
167static inline bool tick_nohz_full_cpu(int cpu) { return false; } 244static inline bool tick_nohz_full_cpu(int cpu) { return false; }
168static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 245static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
246
247static inline void tick_dep_set(enum tick_dep_bits bit) { }
248static inline void tick_dep_clear(enum tick_dep_bits bit) { }
249static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
250static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
251static inline void tick_dep_set_task(struct task_struct *tsk,
252 enum tick_dep_bits bit) { }
253static inline void tick_dep_clear_task(struct task_struct *tsk,
254 enum tick_dep_bits bit) { }
255static inline void tick_dep_set_signal(struct signal_struct *signal,
256 enum tick_dep_bits bit) { }
257static inline void tick_dep_clear_signal(struct signal_struct *signal,
258 enum tick_dep_bits bit) { }
259
169static inline void tick_nohz_full_kick_cpu(int cpu) { } 260static inline void tick_nohz_full_kick_cpu(int cpu) { }
170static inline void tick_nohz_full_kick(void) { }
171static inline void tick_nohz_full_kick_all(void) { }
172static inline void __tick_nohz_task_switch(void) { } 261static inline void __tick_nohz_task_switch(void) { }
173#endif 262#endif
174 263
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 25247220b4b7..e88005459035 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -50,6 +50,7 @@ struct tk_read_base {
50 * @offs_tai: Offset clock monotonic -> clock tai 50 * @offs_tai: Offset clock monotonic -> clock tai
51 * @tai_offset: The current UTC to TAI offset in seconds 51 * @tai_offset: The current UTC to TAI offset in seconds
52 * @clock_was_set_seq: The sequence number of clock was set events 52 * @clock_was_set_seq: The sequence number of clock was set events
53 * @cs_was_changed_seq: The sequence number of clocksource change events
53 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 54 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
54 * @raw_time: Monotonic raw base time in timespec64 format 55 * @raw_time: Monotonic raw base time in timespec64 format
55 * @cycle_interval: Number of clock cycles in one NTP interval 56 * @cycle_interval: Number of clock cycles in one NTP interval
@@ -91,6 +92,7 @@ struct timekeeper {
91 ktime_t offs_tai; 92 ktime_t offs_tai;
92 s32 tai_offset; 93 s32 tai_offset;
93 unsigned int clock_was_set_seq; 94 unsigned int clock_was_set_seq;
95 u8 cs_was_changed_seq;
94 ktime_t next_leap_ktime; 96 ktime_t next_leap_ktime;
95 struct timespec64 raw_time; 97 struct timespec64 raw_time;
96 98
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index ec89d846324c..96f37bee3bc1 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -267,6 +267,64 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
267 struct timespec64 *ts_real); 267 struct timespec64 *ts_real);
268 268
269/* 269/*
270 * struct system_time_snapshot - simultaneous raw/real time capture with
271 * counter value
272 * @cycles: Clocksource counter value to produce the system times
273 * @real: Realtime system time
274 * @raw: Monotonic raw system time
275 * @clock_was_set_seq: The sequence number of clock was set events
276 * @cs_was_changed_seq: The sequence number of clocksource change events
277 */
278struct system_time_snapshot {
279 cycle_t cycles;
280 ktime_t real;
281 ktime_t raw;
282 unsigned int clock_was_set_seq;
283 u8 cs_was_changed_seq;
284};
285
286/*
287 * struct system_device_crosststamp - system/device cross-timestamp
288 * (syncronized capture)
289 * @device: Device time
290 * @sys_realtime: Realtime simultaneous with device time
291 * @sys_monoraw: Monotonic raw simultaneous with device time
292 */
293struct system_device_crosststamp {
294 ktime_t device;
295 ktime_t sys_realtime;
296 ktime_t sys_monoraw;
297};
298
299/*
300 * struct system_counterval_t - system counter value with the pointer to the
301 * corresponding clocksource
302 * @cycles: System counter value
303 * @cs: Clocksource corresponding to system counter value. Used by
304 * timekeeping code to verify comparibility of two cycle values
305 */
306struct system_counterval_t {
307 cycle_t cycles;
308 struct clocksource *cs;
309};
310
311/*
312 * Get cross timestamp between system clock and device clock
313 */
314extern int get_device_system_crosststamp(
315 int (*get_time_fn)(ktime_t *device_time,
316 struct system_counterval_t *system_counterval,
317 void *ctx),
318 void *ctx,
319 struct system_time_snapshot *history,
320 struct system_device_crosststamp *xtstamp);
321
322/*
323 * Simultaneously snapshot realtime and monotonic raw clocks
324 */
325extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
326
327/*
270 * Persistent clock related interfaces 328 * Persistent clock related interfaces
271 */ 329 */
272extern int persistent_clock_is_local; 330extern int persistent_clock_is_local;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 925730bc9fc1..0810f81b6db2 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -15,16 +15,6 @@ struct tracer;
15struct dentry; 15struct dentry;
16struct bpf_prog; 16struct bpf_prog;
17 17
18struct trace_print_flags {
19 unsigned long mask;
20 const char *name;
21};
22
23struct trace_print_flags_u64 {
24 unsigned long long mask;
25 const char *name;
26};
27
28const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, 18const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
29 unsigned long flags, 19 unsigned long flags,
30 const struct trace_print_flags *flag_array); 20 const struct trace_print_flags *flag_array);
@@ -430,7 +420,8 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
430extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, 420extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
431 void *rec); 421 void *rec);
432extern void event_triggers_post_call(struct trace_event_file *file, 422extern void event_triggers_post_call(struct trace_event_file *file,
433 enum event_trigger_type tt); 423 enum event_trigger_type tt,
424 void *rec);
434 425
435bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); 426bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
436 427
@@ -517,7 +508,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
517 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); 508 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
518 509
519 if (tt) 510 if (tt)
520 event_triggers_post_call(file, tt); 511 event_triggers_post_call(file, tt, entry);
521} 512}
522 513
523/** 514/**
@@ -550,7 +541,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
550 irq_flags, pc, regs); 541 irq_flags, pc, regs);
551 542
552 if (tt) 543 if (tt)
553 event_triggers_post_call(file, tt); 544 event_triggers_post_call(file, tt, entry);
554} 545}
555 546
556#ifdef CONFIG_BPF_EVENTS 547#ifdef CONFIG_BPF_EVENTS
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index e1ee97c713bf..4ac89acb6136 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -3,13 +3,23 @@
3 3
4/* 4/*
5 * File can be included directly by headers who only want to access 5 * File can be included directly by headers who only want to access
6 * tracepoint->key to guard out of line trace calls. Otherwise 6 * tracepoint->key to guard out of line trace calls, or the definition of
7 * linux/tracepoint.h should be used. 7 * trace_print_flags{_u64}. Otherwise linux/tracepoint.h should be used.
8 */ 8 */
9 9
10#include <linux/atomic.h> 10#include <linux/atomic.h>
11#include <linux/static_key.h> 11#include <linux/static_key.h>
12 12
13struct trace_print_flags {
14 unsigned long mask;
15 const char *name;
16};
17
18struct trace_print_flags_u64 {
19 unsigned long long mask;
20 const char *name;
21};
22
13struct tracepoint_func { 23struct tracepoint_func {
14 void *func; 24 void *func;
15 void *data; 25 void *data;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index d9fb4b043f56..3b09f235db66 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -302,6 +302,7 @@ struct tty_struct {
302 struct work_struct hangup_work; 302 struct work_struct hangup_work;
303 void *disc_data; 303 void *disc_data;
304 void *driver_data; 304 void *driver_data;
305 spinlock_t files_lock; /* protects tty_files list */
305 struct list_head tty_files; 306 struct list_head tty_files;
306 307
307#define N_TTY_BUF_SIZE 4096 308#define N_TTY_BUF_SIZE 4096
@@ -336,7 +337,6 @@ struct tty_file_private {
336#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */ 337#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */
337#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ 338#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
338#define TTY_EXCLUSIVE 3 /* Exclusive open mode */ 339#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
339#define TTY_DEBUG 4 /* Debugging */
340#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ 340#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
341#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ 341#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
342#define TTY_LDISC_OPEN 11 /* Line discipline is open */ 342#define TTY_LDISC_OPEN 11 /* Line discipline is open */
@@ -433,8 +433,6 @@ extern struct device *tty_register_device_attr(struct tty_driver *driver,
433 void *drvdata, 433 void *drvdata,
434 const struct attribute_group **attr_grp); 434 const struct attribute_group **attr_grp);
435extern void tty_unregister_device(struct tty_driver *driver, unsigned index); 435extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
436extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
437 int buflen);
438extern void tty_write_message(struct tty_struct *tty, char *msg); 436extern void tty_write_message(struct tty_struct *tty, char *msg);
439extern int tty_send_xchar(struct tty_struct *tty, char ch); 437extern int tty_send_xchar(struct tty_struct *tty, char ch);
440extern int tty_put_char(struct tty_struct *tty, unsigned char c); 438extern int tty_put_char(struct tty_struct *tty, unsigned char c);
@@ -446,12 +444,7 @@ extern void tty_unthrottle(struct tty_struct *tty);
446extern int tty_throttle_safe(struct tty_struct *tty); 444extern int tty_throttle_safe(struct tty_struct *tty);
447extern int tty_unthrottle_safe(struct tty_struct *tty); 445extern int tty_unthrottle_safe(struct tty_struct *tty);
448extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); 446extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
449extern void tty_driver_remove_tty(struct tty_driver *driver,
450 struct tty_struct *tty);
451extern void tty_free_termios(struct tty_struct *tty);
452extern int is_current_pgrp_orphaned(void); 447extern int is_current_pgrp_orphaned(void);
453extern int is_ignored(int sig);
454extern int tty_signal(int sig, struct tty_struct *tty);
455extern void tty_hangup(struct tty_struct *tty); 448extern void tty_hangup(struct tty_struct *tty);
456extern void tty_vhangup(struct tty_struct *tty); 449extern void tty_vhangup(struct tty_struct *tty);
457extern int tty_hung_up_p(struct file *filp); 450extern int tty_hung_up_p(struct file *filp);
@@ -493,7 +486,8 @@ extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
493extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); 486extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
494extern void tty_ldisc_deref(struct tty_ldisc *); 487extern void tty_ldisc_deref(struct tty_ldisc *);
495extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); 488extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
496extern void tty_ldisc_hangup(struct tty_struct *tty); 489extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
490extern int tty_ldisc_reinit(struct tty_struct *tty, int disc);
497extern const struct file_operations tty_ldiscs_proc_fops; 491extern const struct file_operations tty_ldiscs_proc_fops;
498 492
499extern void tty_wakeup(struct tty_struct *tty); 493extern void tty_wakeup(struct tty_struct *tty);
@@ -508,16 +502,13 @@ extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
508extern int tty_alloc_file(struct file *file); 502extern int tty_alloc_file(struct file *file);
509extern void tty_add_file(struct tty_struct *tty, struct file *file); 503extern void tty_add_file(struct tty_struct *tty, struct file *file);
510extern void tty_free_file(struct file *file); 504extern void tty_free_file(struct file *file);
511extern void free_tty_struct(struct tty_struct *tty);
512extern void deinitialize_tty_struct(struct tty_struct *tty);
513extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); 505extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
514extern int tty_release(struct inode *inode, struct file *filp); 506extern int tty_release(struct inode *inode, struct file *filp);
515extern int tty_init_termios(struct tty_struct *tty); 507extern void tty_init_termios(struct tty_struct *tty);
516extern int tty_standard_install(struct tty_driver *driver, 508extern int tty_standard_install(struct tty_driver *driver,
517 struct tty_struct *tty); 509 struct tty_struct *tty);
518 510
519extern struct mutex tty_mutex; 511extern struct mutex tty_mutex;
520extern spinlock_t tty_files_lock;
521 512
522#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) 513#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
523 514
@@ -575,43 +566,29 @@ static inline int tty_port_users(struct tty_port *port)
575 566
576extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); 567extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
577extern int tty_unregister_ldisc(int disc); 568extern int tty_unregister_ldisc(int disc);
578extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); 569extern int tty_set_ldisc(struct tty_struct *tty, int disc);
579extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); 570extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
580extern void tty_ldisc_release(struct tty_struct *tty); 571extern void tty_ldisc_release(struct tty_struct *tty);
581extern void tty_ldisc_init(struct tty_struct *tty); 572extern void tty_ldisc_init(struct tty_struct *tty);
582extern void tty_ldisc_deinit(struct tty_struct *tty); 573extern void tty_ldisc_deinit(struct tty_struct *tty);
583extern void tty_ldisc_begin(void); 574extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
584 575 char *f, int count);
585static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
586 char *f, int count)
587{
588 if (ld->ops->receive_buf2)
589 count = ld->ops->receive_buf2(ld->tty, p, f, count);
590 else {
591 count = min_t(int, count, ld->tty->receive_room);
592 if (count)
593 ld->ops->receive_buf(ld->tty, p, f, count);
594 }
595 return count;
596}
597
598 576
599/* n_tty.c */ 577/* n_tty.c */
600extern struct tty_ldisc_ops tty_ldisc_N_TTY;
601extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); 578extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
579extern void __init n_tty_init(void);
602 580
603/* tty_audit.c */ 581/* tty_audit.c */
604#ifdef CONFIG_AUDIT 582#ifdef CONFIG_AUDIT
605extern void tty_audit_add_data(struct tty_struct *tty, const void *data, 583extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
606 size_t size, unsigned icanon); 584 size_t size);
607extern void tty_audit_exit(void); 585extern void tty_audit_exit(void);
608extern void tty_audit_fork(struct signal_struct *sig); 586extern void tty_audit_fork(struct signal_struct *sig);
609extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); 587extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
610extern void tty_audit_push(struct tty_struct *tty); 588extern int tty_audit_push(void);
611extern int tty_audit_push_current(void);
612#else 589#else
613static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, 590static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
614 size_t size, unsigned icanon) 591 size_t size)
615{ 592{
616} 593}
617static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) 594static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
@@ -623,10 +600,7 @@ static inline void tty_audit_exit(void)
623static inline void tty_audit_fork(struct signal_struct *sig) 600static inline void tty_audit_fork(struct signal_struct *sig)
624{ 601{
625} 602}
626static inline void tty_audit_push(struct tty_struct *tty) 603static inline int tty_audit_push(void)
627{
628}
629static inline int tty_audit_push_current(void)
630{ 604{
631 return 0; 605 return 0;
632} 606}
@@ -648,11 +622,11 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
648 622
649/* tty_mutex.c */ 623/* tty_mutex.c */
650/* functions for preparation of BKL removal */ 624/* functions for preparation of BKL removal */
651extern void __lockfunc tty_lock(struct tty_struct *tty); 625extern void tty_lock(struct tty_struct *tty);
652extern int tty_lock_interruptible(struct tty_struct *tty); 626extern int tty_lock_interruptible(struct tty_struct *tty);
653extern void __lockfunc tty_unlock(struct tty_struct *tty); 627extern void tty_unlock(struct tty_struct *tty);
654extern void __lockfunc tty_lock_slave(struct tty_struct *tty); 628extern void tty_lock_slave(struct tty_struct *tty);
655extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); 629extern void tty_unlock_slave(struct tty_struct *tty);
656extern void tty_set_lock_subclass(struct tty_struct *tty); 630extern void tty_set_lock_subclass(struct tty_struct *tty);
657 631
658#ifdef CONFIG_PROC_FS 632#ifdef CONFIG_PROC_FS
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 161052477f77..b742b5e47cc2 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,7 +7,7 @@
7 * defined; unless noted otherwise, they are optional, and can be 7 * defined; unless noted otherwise, they are optional, and can be
8 * filled in with a null pointer. 8 * filled in with a null pointer.
9 * 9 *
10 * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) 10 * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
11 * 11 *
12 * Return the tty device corresponding to idx, NULL if there is not 12 * Return the tty device corresponding to idx, NULL if there is not
13 * one currently in use and an ERR_PTR value on error. Called under 13 * one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
250 250
251struct tty_operations { 251struct tty_operations {
252 struct tty_struct * (*lookup)(struct tty_driver *driver, 252 struct tty_struct * (*lookup)(struct tty_driver *driver,
253 struct inode *inode, int idx); 253 struct file *filp, int idx);
254 int (*install)(struct tty_driver *driver, struct tty_struct *tty); 254 int (*install)(struct tty_driver *driver, struct tty_struct *tty);
255 void (*remove)(struct tty_driver *driver, struct tty_struct *tty); 255 void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
256 int (*open)(struct tty_struct * tty, struct file * filp); 256 int (*open)(struct tty_struct * tty, struct file * filp);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 00c9d688d7b7..3971cf0eb467 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -25,12 +25,6 @@
25 * buffers of any input characters it may have queued to be 25 * buffers of any input characters it may have queued to be
26 * delivered to the user mode process. 26 * delivered to the user mode process.
27 * 27 *
28 * ssize_t (*chars_in_buffer)(struct tty_struct *tty);
29 *
30 * This function returns the number of input characters the line
31 * discipline may have queued up to be delivered to the user mode
32 * process.
33 *
34 * ssize_t (*read)(struct tty_struct * tty, struct file * file, 28 * ssize_t (*read)(struct tty_struct * tty, struct file * file,
35 * unsigned char * buf, size_t nr); 29 * unsigned char * buf, size_t nr);
36 * 30 *
@@ -104,11 +98,6 @@
104 * seek to perform this action quickly but should wait until 98 * seek to perform this action quickly but should wait until
105 * any pending driver I/O is completed. 99 * any pending driver I/O is completed.
106 * 100 *
107 * void (*fasync)(struct tty_struct *, int on)
108 *
109 * Notify line discipline when signal-driven I/O is enabled or
110 * disabled.
111 *
112 * void (*dcd_change)(struct tty_struct *tty, unsigned int status) 101 * void (*dcd_change)(struct tty_struct *tty, unsigned int status)
113 * 102 *
114 * Tells the discipline that the DCD pin has changed its status. 103 * Tells the discipline that the DCD pin has changed its status.
@@ -188,7 +177,6 @@ struct tty_ldisc_ops {
188 int (*open)(struct tty_struct *); 177 int (*open)(struct tty_struct *);
189 void (*close)(struct tty_struct *); 178 void (*close)(struct tty_struct *);
190 void (*flush_buffer)(struct tty_struct *tty); 179 void (*flush_buffer)(struct tty_struct *tty);
191 ssize_t (*chars_in_buffer)(struct tty_struct *tty);
192 ssize_t (*read)(struct tty_struct *tty, struct file *file, 180 ssize_t (*read)(struct tty_struct *tty, struct file *file,
193 unsigned char __user *buf, size_t nr); 181 unsigned char __user *buf, size_t nr);
194 ssize_t (*write)(struct tty_struct *tty, struct file *file, 182 ssize_t (*write)(struct tty_struct *tty, struct file *file,
@@ -209,7 +197,6 @@ struct tty_ldisc_ops {
209 char *fp, int count); 197 char *fp, int count);
210 void (*write_wakeup)(struct tty_struct *); 198 void (*write_wakeup)(struct tty_struct *);
211 void (*dcd_change)(struct tty_struct *, unsigned int); 199 void (*dcd_change)(struct tty_struct *, unsigned int);
212 void (*fasync)(struct tty_struct *tty, int on);
213 int (*receive_buf2)(struct tty_struct *, const unsigned char *cp, 200 int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
214 char *fp, int count); 201 char *fp, int count);
215 202
diff --git a/include/linux/uio.h b/include/linux/uio.h
index fd9bcfedad42..1b5d1cd796e2 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -87,6 +87,7 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
87size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 87size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
88size_t iov_iter_zero(size_t bytes, struct iov_iter *); 88size_t iov_iter_zero(size_t bytes, struct iov_iter *);
89unsigned long iov_iter_alignment(const struct iov_iter *i); 89unsigned long iov_iter_alignment(const struct iov_iter *i);
90unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
90void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, 91void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
91 unsigned long nr_segs, size_t count); 92 unsigned long nr_segs, size_t count);
92void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, 93void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
index 99c1b4d20b0f..33383ca23837 100644
--- a/include/linux/unaligned/access_ok.h
+++ b/include/linux/unaligned/access_ok.h
@@ -4,62 +4,62 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/byteorder.h> 5#include <asm/byteorder.h>
6 6
7static inline u16 get_unaligned_le16(const void *p) 7static __always_inline u16 get_unaligned_le16(const void *p)
8{ 8{
9 return le16_to_cpup((__le16 *)p); 9 return le16_to_cpup((__le16 *)p);
10} 10}
11 11
12static inline u32 get_unaligned_le32(const void *p) 12static __always_inline u32 get_unaligned_le32(const void *p)
13{ 13{
14 return le32_to_cpup((__le32 *)p); 14 return le32_to_cpup((__le32 *)p);
15} 15}
16 16
17static inline u64 get_unaligned_le64(const void *p) 17static __always_inline u64 get_unaligned_le64(const void *p)
18{ 18{
19 return le64_to_cpup((__le64 *)p); 19 return le64_to_cpup((__le64 *)p);
20} 20}
21 21
22static inline u16 get_unaligned_be16(const void *p) 22static __always_inline u16 get_unaligned_be16(const void *p)
23{ 23{
24 return be16_to_cpup((__be16 *)p); 24 return be16_to_cpup((__be16 *)p);
25} 25}
26 26
27static inline u32 get_unaligned_be32(const void *p) 27static __always_inline u32 get_unaligned_be32(const void *p)
28{ 28{
29 return be32_to_cpup((__be32 *)p); 29 return be32_to_cpup((__be32 *)p);
30} 30}
31 31
32static inline u64 get_unaligned_be64(const void *p) 32static __always_inline u64 get_unaligned_be64(const void *p)
33{ 33{
34 return be64_to_cpup((__be64 *)p); 34 return be64_to_cpup((__be64 *)p);
35} 35}
36 36
37static inline void put_unaligned_le16(u16 val, void *p) 37static __always_inline void put_unaligned_le16(u16 val, void *p)
38{ 38{
39 *((__le16 *)p) = cpu_to_le16(val); 39 *((__le16 *)p) = cpu_to_le16(val);
40} 40}
41 41
42static inline void put_unaligned_le32(u32 val, void *p) 42static __always_inline void put_unaligned_le32(u32 val, void *p)
43{ 43{
44 *((__le32 *)p) = cpu_to_le32(val); 44 *((__le32 *)p) = cpu_to_le32(val);
45} 45}
46 46
47static inline void put_unaligned_le64(u64 val, void *p) 47static __always_inline void put_unaligned_le64(u64 val, void *p)
48{ 48{
49 *((__le64 *)p) = cpu_to_le64(val); 49 *((__le64 *)p) = cpu_to_le64(val);
50} 50}
51 51
52static inline void put_unaligned_be16(u16 val, void *p) 52static __always_inline void put_unaligned_be16(u16 val, void *p)
53{ 53{
54 *((__be16 *)p) = cpu_to_be16(val); 54 *((__be16 *)p) = cpu_to_be16(val);
55} 55}
56 56
57static inline void put_unaligned_be32(u32 val, void *p) 57static __always_inline void put_unaligned_be32(u32 val, void *p)
58{ 58{
59 *((__be32 *)p) = cpu_to_be32(val); 59 *((__be32 *)p) = cpu_to_be32(val);
60} 60}
61 61
62static inline void put_unaligned_be64(u64 val, void *p) 62static __always_inline void put_unaligned_be64(u64 val, void *p)
63{ 63{
64 *((__be64 *)p) = cpu_to_be64(val); 64 *((__be64 *)p) = cpu_to_be64(val);
65} 65}
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 89533ba38691..6a9a0c28415d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -50,6 +50,7 @@ struct ep_device;
50 * struct usb_host_endpoint - host-side endpoint descriptor and queue 50 * struct usb_host_endpoint - host-side endpoint descriptor and queue
51 * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder 51 * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
52 * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint 52 * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
53 * @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint
53 * @urb_list: urbs queued to this endpoint; maintained by usbcore 54 * @urb_list: urbs queued to this endpoint; maintained by usbcore
54 * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH) 55 * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
55 * with one or more transfer descriptors (TDs) per urb 56 * with one or more transfer descriptors (TDs) per urb
@@ -65,6 +66,7 @@ struct ep_device;
65struct usb_host_endpoint { 66struct usb_host_endpoint {
66 struct usb_endpoint_descriptor desc; 67 struct usb_endpoint_descriptor desc;
67 struct usb_ss_ep_comp_descriptor ss_ep_comp; 68 struct usb_ss_ep_comp_descriptor ss_ep_comp;
69 struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
68 struct list_head urb_list; 70 struct list_head urb_list;
69 void *hcpriv; 71 void *hcpriv;
70 struct ep_device *ep_dev; /* For sysfs info */ 72 struct ep_device *ep_dev; /* For sysfs info */
@@ -330,6 +332,7 @@ struct usb_host_bos {
330 struct usb_ss_cap_descriptor *ss_cap; 332 struct usb_ss_cap_descriptor *ss_cap;
331 struct usb_ssp_cap_descriptor *ssp_cap; 333 struct usb_ssp_cap_descriptor *ssp_cap;
332 struct usb_ss_container_id_descriptor *ss_id; 334 struct usb_ss_container_id_descriptor *ss_id;
335 struct usb_ptm_cap_descriptor *ptm_cap;
333}; 336};
334 337
335int __usb_get_extra_descriptor(char *buffer, unsigned size, 338int __usb_get_extra_descriptor(char *buffer, unsigned size,
@@ -375,7 +378,6 @@ struct usb_bus {
375 struct usb_devmap devmap; /* device address allocation map */ 378 struct usb_devmap devmap; /* device address allocation map */
376 struct usb_device *root_hub; /* Root hub */ 379 struct usb_device *root_hub; /* Root hub */
377 struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ 380 struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
378 struct list_head bus_list; /* list of busses */
379 381
380 struct mutex usb_address0_mutex; /* unaddressed device mutex */ 382 struct mutex usb_address0_mutex; /* unaddressed device mutex */
381 383
@@ -642,9 +644,10 @@ extern struct usb_device *usb_hub_find_child(struct usb_device *hdev,
642 if (!child) continue; else 644 if (!child) continue; else
643 645
644/* USB device locking */ 646/* USB device locking */
645#define usb_lock_device(udev) device_lock(&(udev)->dev) 647#define usb_lock_device(udev) device_lock(&(udev)->dev)
646#define usb_unlock_device(udev) device_unlock(&(udev)->dev) 648#define usb_unlock_device(udev) device_unlock(&(udev)->dev)
647#define usb_trylock_device(udev) device_trylock(&(udev)->dev) 649#define usb_lock_device_interruptible(udev) device_lock_interruptible(&(udev)->dev)
650#define usb_trylock_device(udev) device_trylock(&(udev)->dev)
648extern int usb_lock_device_for_reset(struct usb_device *udev, 651extern int usb_lock_device_for_reset(struct usb_device *udev,
649 const struct usb_interface *iface); 652 const struct usb_interface *iface);
650 653
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 1074b8921a5d..2b81b24eb5aa 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -126,6 +126,10 @@ struct usb_os_desc_table {
126 * string identifiers assigned during @bind(). If this 126 * string identifiers assigned during @bind(). If this
127 * pointer is null after initiation, the function will not 127 * pointer is null after initiation, the function will not
128 * be available at super speed. 128 * be available at super speed.
129 * @ssp_descriptors: Table of super speed plus descriptors, using
130 * interface and string identifiers assigned during @bind(). If
131 * this pointer is null after initiation, the function will not
132 * be available at super speed plus.
129 * @config: assigned when @usb_add_function() is called; this is the 133 * @config: assigned when @usb_add_function() is called; this is the
130 * configuration with which this function is associated. 134 * configuration with which this function is associated.
131 * @os_desc_table: Table of (interface id, os descriptors) pairs. The function 135 * @os_desc_table: Table of (interface id, os descriptors) pairs. The function
@@ -186,6 +190,7 @@ struct usb_function {
186 struct usb_descriptor_header **fs_descriptors; 190 struct usb_descriptor_header **fs_descriptors;
187 struct usb_descriptor_header **hs_descriptors; 191 struct usb_descriptor_header **hs_descriptors;
188 struct usb_descriptor_header **ss_descriptors; 192 struct usb_descriptor_header **ss_descriptors;
193 struct usb_descriptor_header **ssp_descriptors;
189 194
190 struct usb_configuration *config; 195 struct usb_configuration *config;
191 196
@@ -317,6 +322,7 @@ struct usb_configuration {
317 unsigned superspeed:1; 322 unsigned superspeed:1;
318 unsigned highspeed:1; 323 unsigned highspeed:1;
319 unsigned fullspeed:1; 324 unsigned fullspeed:1;
325 unsigned superspeed_plus:1;
320 struct usb_function *interface[MAX_CONFIG_INTERFACES]; 326 struct usb_function *interface[MAX_CONFIG_INTERFACES];
321}; 327};
322 328
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index d82d0068872b..5d4e151c49bf 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -595,6 +595,10 @@ struct usb_gadget_ops {
595 * only supports HNP on a different root port. 595 * only supports HNP on a different root port.
596 * @b_hnp_enable: OTG device feature flag, indicating that the A-Host 596 * @b_hnp_enable: OTG device feature flag, indicating that the A-Host
597 * enabled HNP support. 597 * enabled HNP support.
598 * @hnp_polling_support: OTG device feature flag, indicating if the OTG device
599 * in peripheral mode can support HNP polling.
600 * @host_request_flag: OTG device feature flag, indicating if A-Peripheral
601 * or B-Peripheral wants to take host role.
598 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to 602 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
599 * MaxPacketSize. 603 * MaxPacketSize.
600 * @is_selfpowered: if the gadget is self-powered. 604 * @is_selfpowered: if the gadget is self-powered.
@@ -642,6 +646,8 @@ struct usb_gadget {
642 unsigned b_hnp_enable:1; 646 unsigned b_hnp_enable:1;
643 unsigned a_hnp_support:1; 647 unsigned a_hnp_support:1;
644 unsigned a_alt_hnp_support:1; 648 unsigned a_alt_hnp_support:1;
649 unsigned hnp_polling_support:1;
650 unsigned host_request_flag:1;
645 unsigned quirk_ep_out_aligned_size:1; 651 unsigned quirk_ep_out_aligned_size:1;
646 unsigned quirk_altset_not_supp:1; 652 unsigned quirk_altset_not_supp:1;
647 unsigned quirk_stall_not_supp:1; 653 unsigned quirk_stall_not_supp:1;
@@ -729,6 +735,16 @@ static inline int gadget_is_superspeed(struct usb_gadget *g)
729} 735}
730 736
731/** 737/**
738 * gadget_is_superspeed_plus() - return true if the hardware handles
739 * superspeed plus
740 * @g: controller that might support superspeed plus
741 */
742static inline int gadget_is_superspeed_plus(struct usb_gadget *g)
743{
744 return g->max_speed >= USB_SPEED_SUPER_PLUS;
745}
746
747/**
732 * gadget_is_otg - return true iff the hardware is OTG-ready 748 * gadget_is_otg - return true iff the hardware is OTG-ready
733 * @g: controller that might have a Mini-AB connector 749 * @g: controller that might have a Mini-AB connector
734 * 750 *
@@ -1126,6 +1142,7 @@ extern int usb_add_gadget_udc_release(struct device *parent,
1126 struct usb_gadget *gadget, void (*release)(struct device *dev)); 1142 struct usb_gadget *gadget, void (*release)(struct device *dev));
1127extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); 1143extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
1128extern void usb_del_gadget_udc(struct usb_gadget *gadget); 1144extern void usb_del_gadget_udc(struct usb_gadget *gadget);
1145extern char *usb_get_gadget_udc_name(void);
1129 1146
1130/*-------------------------------------------------------------------------*/ 1147/*-------------------------------------------------------------------------*/
1131 1148
@@ -1194,7 +1211,8 @@ struct usb_function;
1194int usb_assign_descriptors(struct usb_function *f, 1211int usb_assign_descriptors(struct usb_function *f,
1195 struct usb_descriptor_header **fs, 1212 struct usb_descriptor_header **fs,
1196 struct usb_descriptor_header **hs, 1213 struct usb_descriptor_header **hs,
1197 struct usb_descriptor_header **ss); 1214 struct usb_descriptor_header **ss,
1215 struct usb_descriptor_header **ssp);
1198void usb_free_all_descriptors(struct usb_function *f); 1216void usb_free_all_descriptors(struct usb_function *f);
1199 1217
1200struct usb_descriptor_header *usb_otg_descriptor_alloc( 1218struct usb_descriptor_header *usb_otg_descriptor_alloc(
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 4dcf8446dbcd..b98f831dcda3 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -23,6 +23,7 @@
23 23
24#include <linux/rwsem.h> 24#include <linux/rwsem.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/idr.h>
26 27
27#define MAX_TOPO_LEVEL 6 28#define MAX_TOPO_LEVEL 6
28 29
@@ -630,8 +631,8 @@ extern void usb_set_device_state(struct usb_device *udev,
630 631
631/* exported only within usbcore */ 632/* exported only within usbcore */
632 633
633extern struct list_head usb_bus_list; 634extern struct idr usb_bus_idr;
634extern struct mutex usb_bus_list_lock; 635extern struct mutex usb_bus_idr_lock;
635extern wait_queue_head_t usb_kill_urb_queue; 636extern wait_queue_head_t usb_kill_urb_queue;
636 637
637 638
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index e159b39f67a2..974c3796a23f 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -22,6 +22,7 @@
22#define USB_AHBBURST (MSM_USB_BASE + 0x0090) 22#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
23#define USB_AHBMODE (MSM_USB_BASE + 0x0098) 23#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
24#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0) 24#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
25#define ULPI_TX_PKT_EN_CLR_FIX BIT(19)
25 26
26#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ 27#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
27 28
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 96ddfb7ab018..0b3da40a525e 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -124,7 +124,7 @@ struct musb_hdrc_platform_data {
124 int (*set_power)(int state); 124 int (*set_power)(int state);
125 125
126 /* MUSB configuration-specific details */ 126 /* MUSB configuration-specific details */
127 struct musb_hdrc_config *config; 127 const struct musb_hdrc_config *config;
128 128
129 /* Architecture specific board data */ 129 /* Architecture specific board data */
130 void *board_data; 130 void *board_data;
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 974bce93aa28..de3237fce6b2 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -16,6 +16,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np);
16bool of_usb_host_tpl_support(struct device_node *np); 16bool of_usb_host_tpl_support(struct device_node *np);
17int of_usb_update_otg_caps(struct device_node *np, 17int of_usb_update_otg_caps(struct device_node *np,
18 struct usb_otg_caps *otg_caps); 18 struct usb_otg_caps *otg_caps);
19struct device_node *usb_of_get_child_node(struct device_node *parent,
20 int portnum);
19#else 21#else
20static inline enum usb_dr_mode 22static inline enum usb_dr_mode
21of_usb_get_dr_mode_by_phy(struct device_node *phy_np) 23of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
@@ -31,6 +33,11 @@ static inline int of_usb_update_otg_caps(struct device_node *np,
31{ 33{
32 return 0; 34 return 0;
33} 35}
36static inline struct device_node *usb_of_get_child_node
37 (struct device_node *parent, int portnum)
38{
39 return NULL;
40}
34#endif 41#endif
35 42
36#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) 43#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index f728f1854829..24198e16f849 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -40,6 +40,18 @@
40#define PROTO_HOST (1) 40#define PROTO_HOST (1)
41#define PROTO_GADGET (2) 41#define PROTO_GADGET (2)
42 42
43#define OTG_STS_SELECTOR 0xF000 /* OTG status selector, according to
44 * OTG and EH 2.0 Chapter 6.2.3
45 * Table:6-4
46 */
47
48#define HOST_REQUEST_FLAG 1 /* Host request flag, according to
49 * OTG and EH 2.0 Charpter 6.2.3
50 * Table:6-5
51 */
52
53#define T_HOST_REQ_POLL (1500) /* 1500ms, HNP polling interval */
54
43enum otg_fsm_timer { 55enum otg_fsm_timer {
44 /* Standard OTG timers */ 56 /* Standard OTG timers */
45 A_WAIT_VRISE, 57 A_WAIT_VRISE,
@@ -48,6 +60,7 @@ enum otg_fsm_timer {
48 A_AIDL_BDIS, 60 A_AIDL_BDIS,
49 B_ASE0_BRST, 61 B_ASE0_BRST,
50 A_BIDL_ADIS, 62 A_BIDL_ADIS,
63 B_AIDL_BDIS,
51 64
52 /* Auxiliary timers */ 65 /* Auxiliary timers */
53 B_SE0_SRP, 66 B_SE0_SRP,
@@ -119,6 +132,8 @@ struct otg_fsm {
119 /* Current usb protocol used: 0:undefine; 1:host; 2:client */ 132 /* Current usb protocol used: 0:undefine; 1:host; 2:client */
120 int protocol; 133 int protocol;
121 struct mutex lock; 134 struct mutex lock;
135 u8 *host_req_flag;
136 struct delayed_work hnp_polling_work;
122}; 137};
123 138
124struct otg_fsm_ops { 139struct otg_fsm_ops {
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 4db191fe8c2c..00a47d058d83 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -184,6 +184,7 @@ struct renesas_usbhs_driver_param {
184}; 184};
185 185
186#define USBHS_TYPE_RCAR_GEN2 1 186#define USBHS_TYPE_RCAR_GEN2 1
187#define USBHS_TYPE_RCAR_GEN3 2
187 188
188/* 189/*
189 * option: 190 * option:
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
index cb33fff2ba0b..305ee8db7faf 100644
--- a/include/linux/usb/storage.h
+++ b/include/linux/usb/storage.h
@@ -45,9 +45,9 @@
45 45
46#define USB_PR_DEVICE 0xff /* Use device's value */ 46#define USB_PR_DEVICE 0xff /* Use device's value */
47 47
48 /* 48/*
49 * Bulk only data structures 49 * Bulk only data structures
50 */ 50 */
51 51
52/* command block wrapper */ 52/* command block wrapper */
53struct bulk_cb_wrap { 53struct bulk_cb_wrap {
@@ -56,18 +56,18 @@ struct bulk_cb_wrap {
56 __le32 DataTransferLength; /* size of data */ 56 __le32 DataTransferLength; /* size of data */
57 __u8 Flags; /* direction in bit 0 */ 57 __u8 Flags; /* direction in bit 0 */
58 __u8 Lun; /* LUN normally 0 */ 58 __u8 Lun; /* LUN normally 0 */
59 __u8 Length; /* of of the CDB */ 59 __u8 Length; /* length of the CDB */
60 __u8 CDB[16]; /* max command */ 60 __u8 CDB[16]; /* max command */
61}; 61};
62 62
63#define US_BULK_CB_WRAP_LEN 31 63#define US_BULK_CB_WRAP_LEN 31
64#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ 64#define US_BULK_CB_SIGN 0x43425355 /* spells out 'USBC' */
65#define US_BULK_FLAG_IN (1 << 7) 65#define US_BULK_FLAG_IN (1 << 7)
66#define US_BULK_FLAG_OUT 0 66#define US_BULK_FLAG_OUT 0
67 67
68/* command status wrapper */ 68/* command status wrapper */
69struct bulk_cs_wrap { 69struct bulk_cs_wrap {
70 __le32 Signature; /* should = 'USBS' */ 70 __le32 Signature; /* contains 'USBS' */
71 __u32 Tag; /* same as original command */ 71 __u32 Tag; /* same as original command */
72 __le32 Residue; /* amount not transferred */ 72 __le32 Residue; /* amount not transferred */
73 __u8 Status; /* see below */ 73 __u8 Status; /* see below */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 7f5f78bd15ad..245f57dbbb61 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -79,6 +79,8 @@
79 /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \ 79 /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
80 US_FLAG(MAX_SECTORS_240, 0x08000000) \ 80 US_FLAG(MAX_SECTORS_240, 0x08000000) \
81 /* Sets max_sectors to 240 */ \ 81 /* Sets max_sectors to 240 */ \
82 US_FLAG(NO_REPORT_LUNS, 0x10000000) \
83 /* Cannot handle REPORT_LUNS */ \
82 84
83#define US_FLAG(name, value) US_FL_##name = value , 85#define US_FLAG(name, value) US_FL_##name = value ,
84enum { US_DO_ALL_FLAGS }; 86enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 610a86a892b8..0ecae0b1cd34 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -92,6 +92,17 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group);
92extern long vfio_external_check_extension(struct vfio_group *group, 92extern long vfio_external_check_extension(struct vfio_group *group,
93 unsigned long arg); 93 unsigned long arg);
94 94
95/*
96 * Sub-module helpers
97 */
98struct vfio_info_cap {
99 struct vfio_info_cap_header *buf;
100 size_t size;
101};
102extern struct vfio_info_cap_header *vfio_info_cap_add(
103 struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
104extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
105
95struct pci_dev; 106struct pci_dev;
96#ifdef CONFIG_EEH 107#ifdef CONFIG_EEH
97extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); 108extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 69e1d4a1f1b3..b39a5f3153bd 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -36,6 +36,26 @@
36struct pci_dev; 36struct pci_dev;
37 37
38/** 38/**
39 * enum vga_switcheroo_handler_flags_t - handler flags bitmask
40 * @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the
41 * DDC lines separately. This signals to clients that they should call
42 * drm_get_edid_switcheroo() to probe the EDID
43 * @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch
44 * the AUX channel separately. This signals to clients that the active
45 * GPU needs to train the link and communicate the link parameters to the
46 * inactive GPU (mediated by vga_switcheroo). The inactive GPU may then
47 * skip the AUX handshake and set up its output with these pre-calibrated
48 * values (DisplayPort specification v1.1a, section 2.5.3.3)
49 *
50 * Handler flags bitmask. Used by handlers to declare their capabilities upon
51 * registering with vga_switcheroo.
52 */
53enum vga_switcheroo_handler_flags_t {
54 VGA_SWITCHEROO_CAN_SWITCH_DDC = (1 << 0),
55 VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1),
56};
57
58/**
39 * enum vga_switcheroo_state - client power state 59 * enum vga_switcheroo_state - client power state
40 * @VGA_SWITCHEROO_OFF: off 60 * @VGA_SWITCHEROO_OFF: off
41 * @VGA_SWITCHEROO_ON: on 61 * @VGA_SWITCHEROO_ON: on
@@ -82,6 +102,9 @@ enum vga_switcheroo_client_id {
82 * Mandatory. For muxless machines this should be a no-op. Returning 0 102 * Mandatory. For muxless machines this should be a no-op. Returning 0
83 * denotes success, anything else failure (in which case the switch is 103 * denotes success, anything else failure (in which case the switch is
84 * aborted) 104 * aborted)
105 * @switch_ddc: switch DDC lines to given client.
106 * Optional. Should return the previous DDC owner on success or a
107 * negative int on failure
85 * @power_state: cut or reinstate power of given client. 108 * @power_state: cut or reinstate power of given client.
86 * Optional. The return value is ignored 109 * Optional. The return value is ignored
87 * @get_client_id: determine if given pci device is integrated or discrete GPU. 110 * @get_client_id: determine if given pci device is integrated or discrete GPU.
@@ -93,6 +116,7 @@ enum vga_switcheroo_client_id {
93struct vga_switcheroo_handler { 116struct vga_switcheroo_handler {
94 int (*init)(void); 117 int (*init)(void);
95 int (*switchto)(enum vga_switcheroo_client_id id); 118 int (*switchto)(enum vga_switcheroo_client_id id);
119 int (*switch_ddc)(enum vga_switcheroo_client_id id);
96 int (*power_state)(enum vga_switcheroo_client_id id, 120 int (*power_state)(enum vga_switcheroo_client_id id,
97 enum vga_switcheroo_state state); 121 enum vga_switcheroo_state state);
98 enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev); 122 enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
@@ -132,8 +156,12 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
132void vga_switcheroo_client_fb_set(struct pci_dev *dev, 156void vga_switcheroo_client_fb_set(struct pci_dev *dev,
133 struct fb_info *info); 157 struct fb_info *info);
134 158
135int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler); 159int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
160 enum vga_switcheroo_handler_flags_t handler_flags);
136void vga_switcheroo_unregister_handler(void); 161void vga_switcheroo_unregister_handler(void);
162enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void);
163int vga_switcheroo_lock_ddc(struct pci_dev *pdev);
164int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
137 165
138int vga_switcheroo_process_delayed_switch(void); 166int vga_switcheroo_process_delayed_switch(void);
139 167
@@ -150,11 +178,15 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
150static inline int vga_switcheroo_register_client(struct pci_dev *dev, 178static inline int vga_switcheroo_register_client(struct pci_dev *dev,
151 const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } 179 const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
152static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} 180static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
153static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; } 181static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
182 enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
154static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 183static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
155 const struct vga_switcheroo_client_ops *ops, 184 const struct vga_switcheroo_client_ops *ops,
156 enum vga_switcheroo_client_id id) { return 0; } 185 enum vga_switcheroo_client_id id) { return 0; }
157static inline void vga_switcheroo_unregister_handler(void) {} 186static inline void vga_switcheroo_unregister_handler(void) {}
187static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
188static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
189static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
158static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 190static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
159static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } 191static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
160 192
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8f4d4bfa6d46..d5eb5479a425 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -75,8 +75,27 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
75 75
76bool virtqueue_is_broken(struct virtqueue *vq); 76bool virtqueue_is_broken(struct virtqueue *vq);
77 77
78void *virtqueue_get_avail(struct virtqueue *vq); 78const struct vring *virtqueue_get_vring(struct virtqueue *vq);
79void *virtqueue_get_used(struct virtqueue *vq); 79dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
80dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
81dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
82
83/*
84 * Legacy accessors -- in almost all cases, these are the wrong functions
85 * to use.
86 */
87static inline void *virtqueue_get_desc(struct virtqueue *vq)
88{
89 return virtqueue_get_vring(vq)->desc;
90}
91static inline void *virtqueue_get_avail(struct virtqueue *vq)
92{
93 return virtqueue_get_vring(vq)->avail;
94}
95static inline void *virtqueue_get_used(struct virtqueue *vq)
96{
97 return virtqueue_get_vring(vq)->used;
98}
80 99
81/** 100/**
82 * virtio_device - representation of a device using virtio 101 * virtio_device - representation of a device using virtio
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index a156e2b6ccfe..e8d36938f09a 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -59,6 +59,35 @@ static inline void virtio_store_mb(bool weak_barriers,
59struct virtio_device; 59struct virtio_device;
60struct virtqueue; 60struct virtqueue;
61 61
62/*
63 * Creates a virtqueue and allocates the descriptor ring. If
64 * may_reduce_num is set, then this may allocate a smaller ring than
65 * expected. The caller should query virtqueue_get_ring_size to learn
66 * the actual size of the ring.
67 */
68struct virtqueue *vring_create_virtqueue(unsigned int index,
69 unsigned int num,
70 unsigned int vring_align,
71 struct virtio_device *vdev,
72 bool weak_barriers,
73 bool may_reduce_num,
74 bool (*notify)(struct virtqueue *vq),
75 void (*callback)(struct virtqueue *vq),
76 const char *name);
77
78/* Creates a virtqueue with a custom layout. */
79struct virtqueue *__vring_new_virtqueue(unsigned int index,
80 struct vring vring,
81 struct virtio_device *vdev,
82 bool weak_barriers,
83 bool (*notify)(struct virtqueue *),
84 void (*callback)(struct virtqueue *),
85 const char *name);
86
87/*
88 * Creates a virtqueue with a standard layout but a caller-allocated
89 * ring.
90 */
62struct virtqueue *vring_new_virtqueue(unsigned int index, 91struct virtqueue *vring_new_virtqueue(unsigned int index,
63 unsigned int num, 92 unsigned int num,
64 unsigned int vring_align, 93 unsigned int vring_align,
@@ -68,7 +97,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
68 bool (*notify)(struct virtqueue *vq), 97 bool (*notify)(struct virtqueue *vq),
69 void (*callback)(struct virtqueue *vq), 98 void (*callback)(struct virtqueue *vq),
70 const char *name); 99 const char *name);
100
101/*
102 * Destroys a virtqueue. If created with vring_create_virtqueue, this
103 * also frees the ring.
104 */
71void vring_del_virtqueue(struct virtqueue *vq); 105void vring_del_virtqueue(struct virtqueue *vq);
106
72/* Filter out transport-specific feature bits. */ 107/* Filter out transport-specific feature bits. */
73void vring_transport_features(struct virtio_device *vdev); 108void vring_transport_features(struct virtio_device *vdev);
74 109
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 67c1dbd19c6d..ec084321fe09 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -53,6 +53,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
53 COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, 53 COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
54 COMPACTISOLATED, 54 COMPACTISOLATED,
55 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, 55 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
56 KCOMPACTD_WAKE,
56#endif 57#endif
57#ifdef CONFIG_HUGETLB_PAGE 58#ifdef CONFIG_HUGETLB_PAGE
58 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, 59 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -71,6 +72,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
71 THP_COLLAPSE_ALLOC_FAILED, 72 THP_COLLAPSE_ALLOC_FAILED,
72 THP_SPLIT_PAGE, 73 THP_SPLIT_PAGE,
73 THP_SPLIT_PAGE_FAILED, 74 THP_SPLIT_PAGE_FAILED,
75 THP_DEFERRED_SPLIT_PAGE,
74 THP_SPLIT_PMD, 76 THP_SPLIT_PMD,
75 THP_ZERO_PAGE_ALLOC, 77 THP_ZERO_PAGE_ALLOC,
76 THP_ZERO_PAGE_ALLOC_FAILED, 78 THP_ZERO_PAGE_ALLOC_FAILED,
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 65ac54c61c18..1bd31a38c51e 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -734,6 +734,41 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
734} 734}
735 735
736/* 736/*
737 * Helper to read a value from a head or tail pointer. For X86_32, the
738 * pointer is treated as a 32bit value, since the pointer value
739 * never exceeds a 32bit value in this case. Also, doing an
740 * atomic64_read on X86_32 uniprocessor systems may be implemented
741 * as a non locked cmpxchg8b, that may end up overwriting updates done
742 * by the VMCI device to the memory location. On 32bit SMP, the lock
743 * prefix will be used, so correctness isn't an issue, but using a
744 * 64bit operation still adds unnecessary overhead.
745 */
746static inline u64 vmci_q_read_pointer(atomic64_t *var)
747{
748#if defined(CONFIG_X86_32)
749 return atomic_read((atomic_t *)var);
750#else
751 return atomic64_read(var);
752#endif
753}
754
755/*
756 * Helper to set the value of a head or tail pointer. For X86_32, the
757 * pointer is treated as a 32bit value, since the pointer value
758 * never exceeds a 32bit value in this case. On 32bit SMP, using a
759 * locked cmpxchg8b adds unnecessary overhead.
760 */
761static inline void vmci_q_set_pointer(atomic64_t *var,
762 u64 new_val)
763{
764#if defined(CONFIG_X86_32)
765 return atomic_set((atomic_t *)var, (u32)new_val);
766#else
767 return atomic64_set(var, new_val);
768#endif
769}
770
771/*
737 * Helper to add a given offset to a head or tail pointer. Wraps the 772 * Helper to add a given offset to a head or tail pointer. Wraps the
738 * value of the pointer around the max size of the queue. 773 * value of the pointer around the max size of the queue.
739 */ 774 */
@@ -741,14 +776,14 @@ static inline void vmci_qp_add_pointer(atomic64_t *var,
741 size_t add, 776 size_t add,
742 u64 size) 777 u64 size)
743{ 778{
744 u64 new_val = atomic64_read(var); 779 u64 new_val = vmci_q_read_pointer(var);
745 780
746 if (new_val >= size - add) 781 if (new_val >= size - add)
747 new_val -= size; 782 new_val -= size;
748 783
749 new_val += add; 784 new_val += add;
750 785
751 atomic64_set(var, new_val); 786 vmci_q_set_pointer(var, new_val);
752} 787}
753 788
754/* 789/*
@@ -758,7 +793,7 @@ static inline u64
758vmci_q_header_producer_tail(const struct vmci_queue_header *q_header) 793vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
759{ 794{
760 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; 795 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
761 return atomic64_read(&qh->producer_tail); 796 return vmci_q_read_pointer(&qh->producer_tail);
762} 797}
763 798
764/* 799/*
@@ -768,7 +803,7 @@ static inline u64
768vmci_q_header_consumer_head(const struct vmci_queue_header *q_header) 803vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
769{ 804{
770 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; 805 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
771 return atomic64_read(&qh->consumer_head); 806 return vmci_q_read_pointer(&qh->consumer_head);
772} 807}
773 808
774/* 809/*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index ae71a769b89e..27d7a0ab5da3 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -338,7 +338,7 @@ do { \
338 schedule(); try_to_freeze()) 338 schedule(); try_to_freeze())
339 339
340/** 340/**
341 * wait_event - sleep (or freeze) until a condition gets true 341 * wait_event_freezable - sleep (or freeze) until a condition gets true
342 * @wq: the waitqueue to wait on 342 * @wq: the waitqueue to wait on
343 * @condition: a C expression for the event to wait for 343 * @condition: a C expression for the event to wait for
344 * 344 *
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index b585fa2507ee..51732d6c9555 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -10,8 +10,9 @@
10 10
11 11
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/device.h>
14#include <linux/cdev.h> 13#include <linux/cdev.h>
14#include <linux/device.h>
15#include <linux/kernel.h>
15#include <linux/notifier.h> 16#include <linux/notifier.h>
16#include <uapi/linux/watchdog.h> 17#include <uapi/linux/watchdog.h>
17 18
@@ -46,7 +47,7 @@ struct watchdog_ops {
46 unsigned int (*status)(struct watchdog_device *); 47 unsigned int (*status)(struct watchdog_device *);
47 int (*set_timeout)(struct watchdog_device *, unsigned int); 48 int (*set_timeout)(struct watchdog_device *, unsigned int);
48 unsigned int (*get_timeleft)(struct watchdog_device *); 49 unsigned int (*get_timeleft)(struct watchdog_device *);
49 int (*restart)(struct watchdog_device *); 50 int (*restart)(struct watchdog_device *, unsigned long, void *);
50 long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); 51 long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
51}; 52};
52 53
@@ -61,14 +62,21 @@ struct watchdog_ops {
61 * @bootstatus: Status of the watchdog device at boot. 62 * @bootstatus: Status of the watchdog device at boot.
62 * @timeout: The watchdog devices timeout value (in seconds). 63 * @timeout: The watchdog devices timeout value (in seconds).
63 * @min_timeout:The watchdog devices minimum timeout value (in seconds). 64 * @min_timeout:The watchdog devices minimum timeout value (in seconds).
64 * @max_timeout:The watchdog devices maximum timeout value (in seconds). 65 * @max_timeout:The watchdog devices maximum timeout value (in seconds)
66 * as configurable from user space. Only relevant if
67 * max_hw_heartbeat_ms is not provided.
68 * @min_hw_heartbeat_ms:
69 * Minimum time between heartbeats, in milli-seconds.
70 * @max_hw_heartbeat_ms:
71 * Hardware limit for maximum timeout, in milli-seconds.
72 * Replaces max_timeout if specified.
65 * @reboot_nb: The notifier block to stop watchdog on reboot. 73 * @reboot_nb: The notifier block to stop watchdog on reboot.
66 * @restart_nb: The notifier block to register a restart function. 74 * @restart_nb: The notifier block to register a restart function.
67 * @driver_data:Pointer to the drivers private data. 75 * @driver_data:Pointer to the drivers private data.
68 * @wd_data: Pointer to watchdog core internal data. 76 * @wd_data: Pointer to watchdog core internal data.
69 * @status: Field that contains the devices internal status bits. 77 * @status: Field that contains the devices internal status bits.
70 * @deferred: entry in wtd_deferred_reg_list which is used to 78 * @deferred: Entry in wtd_deferred_reg_list which is used to
71 * register early initialized watchdogs. 79 * register early initialized watchdogs.
72 * 80 *
73 * The watchdog_device structure contains all information about a 81 * The watchdog_device structure contains all information about a
74 * watchdog timer device. 82 * watchdog timer device.
@@ -89,6 +97,8 @@ struct watchdog_device {
89 unsigned int timeout; 97 unsigned int timeout;
90 unsigned int min_timeout; 98 unsigned int min_timeout;
91 unsigned int max_timeout; 99 unsigned int max_timeout;
100 unsigned int min_hw_heartbeat_ms;
101 unsigned int max_hw_heartbeat_ms;
92 struct notifier_block reboot_nb; 102 struct notifier_block reboot_nb;
93 struct notifier_block restart_nb; 103 struct notifier_block restart_nb;
94 void *driver_data; 104 void *driver_data;
@@ -98,6 +108,7 @@ struct watchdog_device {
98#define WDOG_ACTIVE 0 /* Is the watchdog running/active */ 108#define WDOG_ACTIVE 0 /* Is the watchdog running/active */
99#define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */ 109#define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */
100#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ 110#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */
111#define WDOG_HW_RUNNING 3 /* True if HW watchdog running */
101 struct list_head deferred; 112 struct list_head deferred;
102}; 113};
103 114
@@ -110,6 +121,15 @@ static inline bool watchdog_active(struct watchdog_device *wdd)
110 return test_bit(WDOG_ACTIVE, &wdd->status); 121 return test_bit(WDOG_ACTIVE, &wdd->status);
111} 122}
112 123
124/*
125 * Use the following function to check whether or not the hardware watchdog
126 * is running
127 */
128static inline bool watchdog_hw_running(struct watchdog_device *wdd)
129{
130 return test_bit(WDOG_HW_RUNNING, &wdd->status);
131}
132
113/* Use the following function to set the nowayout feature */ 133/* Use the following function to set the nowayout feature */
114static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout) 134static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
115{ 135{
@@ -128,13 +148,18 @@ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigne
128{ 148{
129 /* 149 /*
130 * The timeout is invalid if 150 * The timeout is invalid if
151 * - the requested value is larger than UINT_MAX / 1000
152 * (since internal calculations are done in milli-seconds),
153 * or
131 * - the requested value is smaller than the configured minimum timeout, 154 * - the requested value is smaller than the configured minimum timeout,
132 * or 155 * or
133 * - a maximum timeout is configured, and the requested value is larger 156 * - a maximum hardware timeout is not configured, a maximum timeout
134 * than the maximum timeout. 157 * is configured, and the requested value is larger than the
158 * configured maximum timeout.
135 */ 159 */
136 return t < wdd->min_timeout || 160 return t > UINT_MAX / 1000 || t < wdd->min_timeout ||
137 (wdd->max_timeout && t > wdd->max_timeout); 161 (!wdd->max_hw_heartbeat_ms && wdd->max_timeout &&
162 t > wdd->max_timeout);
138} 163}
139 164
140/* Use the following functions to manipulate watchdog driver specific data */ 165/* Use the following functions to manipulate watchdog driver specific data */