aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/asn1_ber_bytecode.h16
-rw-r--r--include/linux/atmel_serial.h240
-rw-r--r--include/linux/atomic.h361
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/average.h61
-rw-r--r--include/linux/backing-dev.h26
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/bio.h38
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/bitops.h6
-rw-r--r--include/linux/blk-cgroup.h340
-rw-r--r--include/linux/blk_types.h15
-rw-r--r--include/linux/blkdev.h44
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/ceph/msgr.h4
-rw-r--r--include/linux/cgroup-defs.h15
-rw-r--r--include/linux/cgroup.h24
-rw-r--r--include/linux/cgroup_subsys.h30
-rw-r--r--include/linux/clk-provider.h89
-rw-r--r--include/linux/clk/clk-conf.h2
-rw-r--r--include/linux/clk/shmobile.h12
-rw-r--r--include/linux/clk/tegra.h3
-rw-r--r--include/linux/clk/ti.h160
-rw-r--r--include/linux/clockchips.h3
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/context_tracking.h15
-rw-r--r--include/linux/context_tracking_state.h1
-rw-r--r--include/linux/coresight.h21
-rw-r--r--include/linux/cpufeature.h7
-rw-r--r--include/linux/cpufreq.h28
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cred.h8
-rw-r--r--include/linux/crypto.h54
-rw-r--r--include/linux/dax.h39
-rw-r--r--include/linux/debugfs.h20
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/device.h28
-rw-r--r--include/linux/dmaengine.h75
-rw-r--r--include/linux/dmapool.h6
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/extcon.h7
-rw-r--r--include/linux/f2fs_fs.h16
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fs.h57
-rw-r--r--include/linux/fsl_devices.h20
-rw-r--r--include/linux/fsl_ifc.h50
-rw-r--r--include/linux/fsnotify_backend.h59
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/genhd.h33
-rw-r--r--include/linux/gfp.h31
-rw-r--r--include/linux/gpio/consumer.h82
-rw-r--r--include/linux/gpio/driver.h37
-rw-r--r--include/linux/gpio/machine.h1
-rw-r--r--include/linux/huge_mm.h20
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i2c.h19
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/common/st_sensors.h2
-rw-r--r--include/linux/iio/consumer.h2
-rw-r--r--include/linux/iio/iio.h17
-rw-r--r--include/linux/iio/sysfs.h3
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/triggered_buffer.h4
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/input/touchscreen.h11
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/io.h33
-rw-r--r--include/linux/ipmi_smi.h7
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/irq.h19
-rw-r--r--include/linux/irqchip/arm-gic-v3.h13
-rw-r--r--include/linux/irqchip/arm-gic.h10
-rw-r--r--include/linux/irqchip/mips-gic.h14
-rw-r--r--include/linux/irqdesc.h8
-rw-r--r--include/linux/irqdomain.h26
-rw-r--r--include/linux/jbd.h1047
-rw-r--r--include/linux/jbd2.h44
-rw-r--r--include/linux/jbd_common.h46
-rw-r--r--include/linux/jiffies.h35
-rw-r--r--include/linux/jump_label.h261
-rw-r--r--include/linux/kasan.h10
-rw-r--r--include/linux/kernfs.h4
-rw-r--r--include/linux/kexec.h18
-rw-r--r--include/linux/klist.h1
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h25
-rw-r--r--include/linux/libnvdimm.h4
-rw-r--r--include/linux/list.h5
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lsm_audit.h7
-rw-r--r--include/linux/lsm_hooks.h6
-rw-r--r--include/linux/mailbox_controller.h7
-rw-r--r--include/linux/mei_cl_bus.h15
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memcontrol.h392
-rw-r--r--include/linux/memory_hotplug.h5
-rw-r--r--include/linux/mfd/88pm80x.h162
-rw-r--r--include/linux/mfd/arizona/core.h3
-rw-r--r--include/linux/mfd/arizona/pdata.h14
-rw-r--r--include/linux/mfd/arizona/registers.h257
-rw-r--r--include/linux/mfd/axp20x.h67
-rw-r--r--include/linux/mfd/da9062/core.h50
-rw-r--r--include/linux/mfd/da9062/registers.h1108
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/lpc_ich.h6
-rw-r--r--include/linux/mfd/max77693-common.h49
-rw-r--r--include/linux/mfd/max77693-private.h134
-rw-r--r--include/linux/mfd/max77843-private.h174
-rw-r--r--include/linux/mfd/mt6397/core.h1
-rw-r--r--include/linux/mfd/palmas.h7
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h8
-rw-r--r--include/linux/microchipphy.h73
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cq.h3
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx4/driver.h1
-rw-r--r--include/linux/mlx4/qp.h3
-rw-r--r--include/linux/mlx5/device.h21
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mm.h58
-rw-r--r--include/linux/mm_types.h14
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/dw_mmc.h9
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/mmu_notifier.h46
-rw-r--r--include/linux/mmzone.h31
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mpls_iptunnel.h6
-rw-r--r--include/linux/msi.h109
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdevice.h176
-rw-r--r--include/linux/netfilter.h44
-rw-r--r--include/linux/netfilter/nf_conntrack_zones_common.h23
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_bridge.h12
-rw-r--r--include/linux/netfilter_ipv6.h18
-rw-r--r--include/linux/netlink.h13
-rw-r--r--include/linux/nfs4.h18
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h5
-rw-r--r--include/linux/nfs_xdr.h8
-rw-r--r--include/linux/nmi.h21
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/nvmem-consumer.h157
-rw-r--r--include/linux/nvmem-provider.h47
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_gpio.h4
-rw-r--r--include/linux/of_irq.h1
-rw-r--r--include/linux/of_platform.h9
-rw-r--r--include/linux/oid_registry.h7
-rw-r--r--include/linux/oom.h38
-rw-r--r--include/linux/page-flags.h11
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/page_ext.h4
-rw-r--r--include/linux/page_idle.h110
-rw-r--r--include/linux/pci-ats.h49
-rw-r--r--include/linux/pci.h69
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/percpu-rwsem.h20
-rw-r--r--include/linux/perf/arm_pmu.h154
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy_fixed.h8
-rw-r--r--include/linux/platform_data/atmel.h12
-rw-r--r--include/linux/platform_data/atmel_mxt_ts.h (renamed from include/linux/i2c/atmel_mxt_ts.h)12
-rw-r--r--include/linux/platform_data/clk-ux500.h12
-rw-r--r--include/linux/platform_data/gpio-em.h11
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h44
-rw-r--r--include/linux/platform_data/itco_wdt.h19
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h14
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h (renamed from include/linux/input/pixcir_ts.h)1
-rw-r--r--include/linux/platform_data/spi-davinci.h1
-rw-r--r--include/linux/platform_data/spi-mt65xx.h20
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/video-ep93xx.h8
-rw-r--r--include/linux/platform_data/zforce_ts.h3
-rw-r--r--include/linux/pm_domain.h9
-rw-r--r--include/linux/pm_opp.h36
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pmem.h115
-rw-r--r--include/linux/poison.h11
-rw-r--r--include/linux/preempt.h19
-rw-r--r--include/linux/printk.h14
-rw-r--r--include/linux/property.h4
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/psci.h52
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pwm.h99
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/rcupdate.h144
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h385
-rw-r--r--include/linux/regulator/consumer.h16
-rw-r--r--include/linux/regulator/da9211.h19
-rw-r--r--include/linux/regulator/driver.h1
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/mt6311.h29
-rw-r--r--include/linux/reset.h14
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/scatterlist.h9
-rw-r--r--include/linux/sched.h121
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/linux/seq_file.h39
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/shdma-base.h5
-rw-r--r--include/linux/skbuff.h153
-rw-r--r--include/linux/slab.h10
-rw-r--r--include/linux/smpboot.h11
-rw-r--r--include/linux/soc/dove/pmu.h6
-rw-r--r--include/linux/soc/mediatek/infracfg.h26
-rw-r--r--include/linux/soc/qcom/smd-rpm.h35
-rw-r--r--include/linux/soc/qcom/smd.h46
-rw-r--r--include/linux/soc/qcom/smem.h11
-rw-r--r--include/linux/spi/spi.h64
-rw-r--r--include/linux/spinlock.h40
-rw-r--r--include/linux/stmmac.h22
-rw-r--r--include/linux/stop_machine.h28
-rw-r--r--include/linux/string_helpers.h14
-rw-r--r--include/linux/sunrpc/addr.h27
-rw-r--r--include/linux/sunrpc/auth.h8
-rw-r--r--include/linux/sunrpc/cache.h9
-rw-r--r--include/linux/sunrpc/svc.h68
-rw-r--r--include/linux/sunrpc/svc_rdma.h92
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/swap.h23
-rw-r--r--include/linux/swapops.h37
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/ti_wilink_st.h1
-rw-r--r--include/linux/tick.h25
-rw-r--r--include/linux/time64.h35
-rw-r--r--include/linux/timekeeping.h9
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/tty_driver.h2
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uprobes.h17
-rw-r--r--include/linux/usb/chipidea.h15
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/gadget.h198
-rw-r--r--include/linux/usb/hcd.h6
-rw-r--r--include/linux/usb/msm_hsusb.h9
-rw-r--r--include/linux/usb/of.h7
-rw-r--r--include/linux/usb/otg.h15
-rw-r--r--include/linux/userfaultfd_k.h85
-rw-r--r--include/linux/verify_pefile.h6
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/watchdog.h8
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/zbud.h2
-rw-r--r--include/linux/zpool.h6
-rw-r--r--include/linux/zsmalloc.h6
274 files changed, 7504 insertions, 3156 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d2445fa9999f..7235c4851460 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */ 19 */
24 20
@@ -221,7 +217,7 @@ struct pci_dev;
221 217
222int acpi_pci_irq_enable (struct pci_dev *dev); 218int acpi_pci_irq_enable (struct pci_dev *dev);
223void acpi_penalize_isa_irq(int irq, int active); 219void acpi_penalize_isa_irq(int irq, int active);
224 220void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
225void acpi_pci_irq_disable (struct pci_dev *dev); 221void acpi_pci_irq_disable (struct pci_dev *dev);
226 222
227extern int ec_read(u8 addr, u8 *val); 223extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
index 945d44ae529c..ab3a6c002f7b 100644
--- a/include/linux/asn1_ber_bytecode.h
+++ b/include/linux/asn1_ber_bytecode.h
@@ -45,23 +45,27 @@ enum asn1_opcode {
45 ASN1_OP_MATCH_JUMP = 0x04, 45 ASN1_OP_MATCH_JUMP = 0x04,
46 ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, 46 ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05,
47 ASN1_OP_MATCH_ANY = 0x08, 47 ASN1_OP_MATCH_ANY = 0x08,
48 ASN1_OP_MATCH_ANY_OR_SKIP = 0x09,
48 ASN1_OP_MATCH_ANY_ACT = 0x0a, 49 ASN1_OP_MATCH_ANY_ACT = 0x0a,
50 ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b,
49 /* Everything before here matches unconditionally */ 51 /* Everything before here matches unconditionally */
50 52
51 ASN1_OP_COND_MATCH_OR_SKIP = 0x11, 53 ASN1_OP_COND_MATCH_OR_SKIP = 0x11,
52 ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, 54 ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13,
53 ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, 55 ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15,
54 ASN1_OP_COND_MATCH_ANY = 0x18, 56 ASN1_OP_COND_MATCH_ANY = 0x18,
57 ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19,
55 ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, 58 ASN1_OP_COND_MATCH_ANY_ACT = 0x1a,
59 ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b,
56 60
57 /* Everything before here will want a tag from the data */ 61 /* Everything before here will want a tag from the data */
58#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT 62#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP
59 63
60 /* These are here to help fill up space */ 64 /* These are here to help fill up space */
61 ASN1_OP_COND_FAIL = 0x1b, 65 ASN1_OP_COND_FAIL = 0x1c,
62 ASN1_OP_COMPLETE = 0x1c, 66 ASN1_OP_COMPLETE = 0x1d,
63 ASN1_OP_ACT = 0x1d, 67 ASN1_OP_ACT = 0x1e,
64 ASN1_OP_RETURN = 0x1e, 68 ASN1_OP_MAYBE_ACT = 0x1f,
65 69
66 /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ 70 /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
67 ASN1_OP_END_SEQ = 0x20, 71 ASN1_OP_END_SEQ = 0x20,
@@ -76,6 +80,8 @@ enum asn1_opcode {
76#define ASN1_OP_END__OF 0x02 80#define ASN1_OP_END__OF 0x02
77#define ASN1_OP_END__ACT 0x04 81#define ASN1_OP_END__ACT 0x04
78 82
83 ASN1_OP_RETURN = 0x28,
84
79 ASN1_OP__NR 85 ASN1_OP__NR
80}; 86};
81 87
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index 00beddf6be20..ee696d7e8a43 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -16,115 +16,151 @@
16#ifndef ATMEL_SERIAL_H 16#ifndef ATMEL_SERIAL_H
17#define ATMEL_SERIAL_H 17#define ATMEL_SERIAL_H
18 18
19#define ATMEL_US_CR 0x00 /* Control Register */ 19#define ATMEL_US_CR 0x00 /* Control Register */
20#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */ 20#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
21#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */ 21#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
22#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */ 22#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
23#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */ 23#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
24#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */ 24#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
25#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */ 25#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
26#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */ 26#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
27#define ATMEL_US_STTBRK (1 << 9) /* Start Break */ 27#define ATMEL_US_STTBRK BIT(9) /* Start Break */
28#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */ 28#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
29#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */ 29#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
30#define ATMEL_US_SENDA (1 << 12) /* Send Address */ 30#define ATMEL_US_SENDA BIT(12) /* Send Address */
31#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */ 31#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
32#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */ 32#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
33#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */ 33#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
34#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */ 34#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
35#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */ 35#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
36#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */ 36#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
37#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */ 37#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
38#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
39#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
40#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
41#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
42#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
38 43
39#define ATMEL_US_MR 0x04 /* Mode Register */ 44#define ATMEL_US_MR 0x04 /* Mode Register */
40#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */ 45#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
41#define ATMEL_US_USMODE_NORMAL 0 46#define ATMEL_US_USMODE_NORMAL 0
42#define ATMEL_US_USMODE_RS485 1 47#define ATMEL_US_USMODE_RS485 1
43#define ATMEL_US_USMODE_HWHS 2 48#define ATMEL_US_USMODE_HWHS 2
44#define ATMEL_US_USMODE_MODEM 3 49#define ATMEL_US_USMODE_MODEM 3
45#define ATMEL_US_USMODE_ISO7816_T0 4 50#define ATMEL_US_USMODE_ISO7816_T0 4
46#define ATMEL_US_USMODE_ISO7816_T1 6 51#define ATMEL_US_USMODE_ISO7816_T1 6
47#define ATMEL_US_USMODE_IRDA 8 52#define ATMEL_US_USMODE_IRDA 8
48#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */ 53#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
49#define ATMEL_US_USCLKS_MCK (0 << 4) 54#define ATMEL_US_USCLKS_MCK (0 << 4)
50#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4) 55#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
51#define ATMEL_US_USCLKS_SCK (3 << 4) 56#define ATMEL_US_USCLKS_SCK (3 << 4)
52#define ATMEL_US_CHRL (3 << 6) /* Character Length */ 57#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
53#define ATMEL_US_CHRL_5 (0 << 6) 58#define ATMEL_US_CHRL_5 (0 << 6)
54#define ATMEL_US_CHRL_6 (1 << 6) 59#define ATMEL_US_CHRL_6 (1 << 6)
55#define ATMEL_US_CHRL_7 (2 << 6) 60#define ATMEL_US_CHRL_7 (2 << 6)
56#define ATMEL_US_CHRL_8 (3 << 6) 61#define ATMEL_US_CHRL_8 (3 << 6)
57#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */ 62#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
58#define ATMEL_US_PAR (7 << 9) /* Parity Type */ 63#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
59#define ATMEL_US_PAR_EVEN (0 << 9) 64#define ATMEL_US_PAR_EVEN (0 << 9)
60#define ATMEL_US_PAR_ODD (1 << 9) 65#define ATMEL_US_PAR_ODD (1 << 9)
61#define ATMEL_US_PAR_SPACE (2 << 9) 66#define ATMEL_US_PAR_SPACE (2 << 9)
62#define ATMEL_US_PAR_MARK (3 << 9) 67#define ATMEL_US_PAR_MARK (3 << 9)
63#define ATMEL_US_PAR_NONE (4 << 9) 68#define ATMEL_US_PAR_NONE (4 << 9)
64#define ATMEL_US_PAR_MULTI_DROP (6 << 9) 69#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
65#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */ 70#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
66#define ATMEL_US_NBSTOP_1 (0 << 12) 71#define ATMEL_US_NBSTOP_1 (0 << 12)
67#define ATMEL_US_NBSTOP_1_5 (1 << 12) 72#define ATMEL_US_NBSTOP_1_5 (1 << 12)
68#define ATMEL_US_NBSTOP_2 (2 << 12) 73#define ATMEL_US_NBSTOP_2 (2 << 12)
69#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */ 74#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
70#define ATMEL_US_CHMODE_NORMAL (0 << 14) 75#define ATMEL_US_CHMODE_NORMAL (0 << 14)
71#define ATMEL_US_CHMODE_ECHO (1 << 14) 76#define ATMEL_US_CHMODE_ECHO (1 << 14)
72#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14) 77#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
73#define ATMEL_US_CHMODE_REM_LOOP (3 << 14) 78#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
74#define ATMEL_US_MSBF (1 << 16) /* Bit Order */ 79#define ATMEL_US_MSBF BIT(16) /* Bit Order */
75#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */ 80#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
76#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */ 81#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
77#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */ 82#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
78#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */ 83#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
79#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */ 84#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
80#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */ 85#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
81#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */ 86#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
82 87
83#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */ 88#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
84#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */ 89#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
85#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */ 90#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
86#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */ 91#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
87#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */ 92#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
88#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */ 93#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
89#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */ 94#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
90#define ATMEL_US_FRAME (1 << 6) /* Framing Error */ 95#define ATMEL_US_FRAME BIT(6) /* Framing Error */
91#define ATMEL_US_PARE (1 << 7) /* Parity Error */ 96#define ATMEL_US_PARE BIT(7) /* Parity Error */
92#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */ 97#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
93#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */ 98#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
94#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */ 99#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
95#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */ 100#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
96#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */ 101#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
97#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */ 102#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
98#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */ 103#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
99#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */ 104#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
100#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */ 105#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
101#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */ 106#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
102#define ATMEL_US_RI (1 << 20) /* RI */ 107#define ATMEL_US_RI BIT(20) /* RI */
103#define ATMEL_US_DSR (1 << 21) /* DSR */ 108#define ATMEL_US_DSR BIT(21) /* DSR */
104#define ATMEL_US_DCD (1 << 22) /* DCD */ 109#define ATMEL_US_DCD BIT(22) /* DCD */
105#define ATMEL_US_CTS (1 << 23) /* CTS */ 110#define ATMEL_US_CTS BIT(23) /* CTS */
106 111
107#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */ 112#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
108#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */ 113#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
109#define ATMEL_US_CSR 0x14 /* Channel Status Register */ 114#define ATMEL_US_CSR 0x14 /* Channel Status Register */
110#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */ 115#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
111#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */ 116#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
112#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */ 117#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
113 118
114#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */ 119#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
115#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */ 120#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
116 121
117#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */ 122#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
118#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */ 123#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
119 124
120#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */ 125#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
121#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */ 126#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
122 127
123#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */ 128#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
124#define ATMEL_US_NER 0x44 /* Number of Errors Register */ 129#define ATMEL_US_NER 0x44 /* Number of Errors Register */
125#define ATMEL_US_IF 0x4c /* IrDA Filter Register */ 130#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
126 131
127#define ATMEL_US_NAME 0xf0 /* Ip Name */ 132#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
128#define ATMEL_US_VERSION 0xfc /* Ip Version */ 133#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
134#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
135#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
136#define ATMEL_US_ONE_DATA 0x0
137#define ATMEL_US_TWO_DATA 0x1
138#define ATMEL_US_FOUR_DATA 0x2
139#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
140#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
141#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
142#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
143
144#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
145#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
146#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
147
148#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
149#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
150#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
151#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
152#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
153#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
154#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
155#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
156#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
157#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
158#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
159#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
160#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
161#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
162
163#define ATMEL_US_NAME 0xf0 /* Ip Name */
164#define ATMEL_US_VERSION 0xfc /* Ip Version */
129 165
130#endif 166#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a8540ecf..00a5763e850e 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
2#ifndef _LINUX_ATOMIC_H 2#ifndef _LINUX_ATOMIC_H
3#define _LINUX_ATOMIC_H 3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/barrier.h>
6
7/*
8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
9 *
10 * We support four variants:
11 *
12 * - Fully ordered: The default implementation, no suffix required.
13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14 * - Release: Provides RELEASE semantics, _release suffix.
15 * - Relaxed: No ordering guarantees, _relaxed suffix.
16 *
17 * For compound atomics performing both a load and a store, ACQUIRE
18 * semantics apply only to the load and RELEASE semantics only to the
19 * store portion of the operation. Note that a failed cmpxchg_acquire
20 * does -not- imply any memory ordering constraints.
21 *
22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
23 */
24
25#ifndef atomic_read_acquire
26#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
27#endif
28
29#ifndef atomic_set_release
30#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
31#endif
32
33/*
34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed.
37 */
38#define __atomic_op_acquire(op, args...) \
39({ \
40 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
41 smp_mb__after_atomic(); \
42 __ret; \
43})
44
45#define __atomic_op_release(op, args...) \
46({ \
47 smp_mb__before_atomic(); \
48 op##_relaxed(args); \
49})
50
51#define __atomic_op_fence(op, args...) \
52({ \
53 typeof(op##_relaxed(args)) __ret; \
54 smp_mb__before_atomic(); \
55 __ret = op##_relaxed(args); \
56 smp_mb__after_atomic(); \
57 __ret; \
58})
59
60/* atomic_add_return_relaxed */
61#ifndef atomic_add_return_relaxed
62#define atomic_add_return_relaxed atomic_add_return
63#define atomic_add_return_acquire atomic_add_return
64#define atomic_add_return_release atomic_add_return
65
66#else /* atomic_add_return_relaxed */
67
68#ifndef atomic_add_return_acquire
69#define atomic_add_return_acquire(...) \
70 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
71#endif
72
73#ifndef atomic_add_return_release
74#define atomic_add_return_release(...) \
75 __atomic_op_release(atomic_add_return, __VA_ARGS__)
76#endif
77
78#ifndef atomic_add_return
79#define atomic_add_return(...) \
80 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
81#endif
82#endif /* atomic_add_return_relaxed */
83
84/* atomic_sub_return_relaxed */
85#ifndef atomic_sub_return_relaxed
86#define atomic_sub_return_relaxed atomic_sub_return
87#define atomic_sub_return_acquire atomic_sub_return
88#define atomic_sub_return_release atomic_sub_return
89
90#else /* atomic_sub_return_relaxed */
91
92#ifndef atomic_sub_return_acquire
93#define atomic_sub_return_acquire(...) \
94 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
95#endif
96
97#ifndef atomic_sub_return_release
98#define atomic_sub_return_release(...) \
99 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
100#endif
101
102#ifndef atomic_sub_return
103#define atomic_sub_return(...) \
104 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
105#endif
106#endif /* atomic_sub_return_relaxed */
107
108/* atomic_xchg_relaxed */
109#ifndef atomic_xchg_relaxed
110#define atomic_xchg_relaxed atomic_xchg
111#define atomic_xchg_acquire atomic_xchg
112#define atomic_xchg_release atomic_xchg
113
114#else /* atomic_xchg_relaxed */
115
116#ifndef atomic_xchg_acquire
117#define atomic_xchg_acquire(...) \
118 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
119#endif
120
121#ifndef atomic_xchg_release
122#define atomic_xchg_release(...) \
123 __atomic_op_release(atomic_xchg, __VA_ARGS__)
124#endif
125
126#ifndef atomic_xchg
127#define atomic_xchg(...) \
128 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
129#endif
130#endif /* atomic_xchg_relaxed */
131
132/* atomic_cmpxchg_relaxed */
133#ifndef atomic_cmpxchg_relaxed
134#define atomic_cmpxchg_relaxed atomic_cmpxchg
135#define atomic_cmpxchg_acquire atomic_cmpxchg
136#define atomic_cmpxchg_release atomic_cmpxchg
137
138#else /* atomic_cmpxchg_relaxed */
139
140#ifndef atomic_cmpxchg_acquire
141#define atomic_cmpxchg_acquire(...) \
142 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
143#endif
144
145#ifndef atomic_cmpxchg_release
146#define atomic_cmpxchg_release(...) \
147 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
148#endif
149
150#ifndef atomic_cmpxchg
151#define atomic_cmpxchg(...) \
152 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
153#endif
154#endif /* atomic_cmpxchg_relaxed */
155
156#ifndef atomic64_read_acquire
157#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
158#endif
159
160#ifndef atomic64_set_release
161#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
162#endif
163
164/* atomic64_add_return_relaxed */
165#ifndef atomic64_add_return_relaxed
166#define atomic64_add_return_relaxed atomic64_add_return
167#define atomic64_add_return_acquire atomic64_add_return
168#define atomic64_add_return_release atomic64_add_return
169
170#else /* atomic64_add_return_relaxed */
171
172#ifndef atomic64_add_return_acquire
173#define atomic64_add_return_acquire(...) \
174 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
175#endif
176
177#ifndef atomic64_add_return_release
178#define atomic64_add_return_release(...) \
179 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
180#endif
181
182#ifndef atomic64_add_return
183#define atomic64_add_return(...) \
184 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
185#endif
186#endif /* atomic64_add_return_relaxed */
187
188/* atomic64_sub_return_relaxed */
189#ifndef atomic64_sub_return_relaxed
190#define atomic64_sub_return_relaxed atomic64_sub_return
191#define atomic64_sub_return_acquire atomic64_sub_return
192#define atomic64_sub_return_release atomic64_sub_return
193
194#else /* atomic64_sub_return_relaxed */
195
196#ifndef atomic64_sub_return_acquire
197#define atomic64_sub_return_acquire(...) \
198 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
199#endif
200
201#ifndef atomic64_sub_return_release
202#define atomic64_sub_return_release(...) \
203 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
204#endif
205
206#ifndef atomic64_sub_return
207#define atomic64_sub_return(...) \
208 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
209#endif
210#endif /* atomic64_sub_return_relaxed */
211
212/* atomic64_xchg_relaxed */
213#ifndef atomic64_xchg_relaxed
214#define atomic64_xchg_relaxed atomic64_xchg
215#define atomic64_xchg_acquire atomic64_xchg
216#define atomic64_xchg_release atomic64_xchg
217
218#else /* atomic64_xchg_relaxed */
219
220#ifndef atomic64_xchg_acquire
221#define atomic64_xchg_acquire(...) \
222 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
223#endif
224
225#ifndef atomic64_xchg_release
226#define atomic64_xchg_release(...) \
227 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
228#endif
229
230#ifndef atomic64_xchg
231#define atomic64_xchg(...) \
232 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
233#endif
234#endif /* atomic64_xchg_relaxed */
235
236/* atomic64_cmpxchg_relaxed */
237#ifndef atomic64_cmpxchg_relaxed
238#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
239#define atomic64_cmpxchg_acquire atomic64_cmpxchg
240#define atomic64_cmpxchg_release atomic64_cmpxchg
241
242#else /* atomic64_cmpxchg_relaxed */
243
244#ifndef atomic64_cmpxchg_acquire
245#define atomic64_cmpxchg_acquire(...) \
246 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
247#endif
248
249#ifndef atomic64_cmpxchg_release
250#define atomic64_cmpxchg_release(...) \
251 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
252#endif
253
254#ifndef atomic64_cmpxchg
255#define atomic64_cmpxchg(...) \
256 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
257#endif
258#endif /* atomic64_cmpxchg_relaxed */
259
260/* cmpxchg_relaxed */
261#ifndef cmpxchg_relaxed
262#define cmpxchg_relaxed cmpxchg
263#define cmpxchg_acquire cmpxchg
264#define cmpxchg_release cmpxchg
265
266#else /* cmpxchg_relaxed */
267
268#ifndef cmpxchg_acquire
269#define cmpxchg_acquire(...) \
270 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
271#endif
272
273#ifndef cmpxchg_release
274#define cmpxchg_release(...) \
275 __atomic_op_release(cmpxchg, __VA_ARGS__)
276#endif
277
278#ifndef cmpxchg
279#define cmpxchg(...) \
280 __atomic_op_fence(cmpxchg, __VA_ARGS__)
281#endif
282#endif /* cmpxchg_relaxed */
283
284/* cmpxchg64_relaxed */
285#ifndef cmpxchg64_relaxed
286#define cmpxchg64_relaxed cmpxchg64
287#define cmpxchg64_acquire cmpxchg64
288#define cmpxchg64_release cmpxchg64
289
290#else /* cmpxchg64_relaxed */
291
292#ifndef cmpxchg64_acquire
293#define cmpxchg64_acquire(...) \
294 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
295#endif
296
297#ifndef cmpxchg64_release
298#define cmpxchg64_release(...) \
299 __atomic_op_release(cmpxchg64, __VA_ARGS__)
300#endif
301
302#ifndef cmpxchg64
303#define cmpxchg64(...) \
304 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
305#endif
306#endif /* cmpxchg64_relaxed */
307
308/* xchg_relaxed */
309#ifndef xchg_relaxed
310#define xchg_relaxed xchg
311#define xchg_acquire xchg
312#define xchg_release xchg
313
314#else /* xchg_relaxed */
315
316#ifndef xchg_acquire
317#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
318#endif
319
320#ifndef xchg_release
321#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
322#endif
323
324#ifndef xchg
325#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
326#endif
327#endif /* xchg_relaxed */
5 328
6/** 329/**
7 * atomic_add_unless - add unless the number is already a given value 330 * atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
28#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 351#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
29#endif 352#endif
30 353
354#ifndef atomic_andnot
355static inline void atomic_andnot(int i, atomic_t *v)
356{
357 atomic_and(~i, v);
358}
359#endif
360
361static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
362{
363 atomic_andnot(mask, v);
364}
365
366static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
367{
368 atomic_or(mask, v);
369}
370
31/** 371/**
32 * atomic_inc_not_zero_hint - increment if not null 372 * atomic_inc_not_zero_hint - increment if not null
33 * @v: pointer of type atomic_t 373 * @v: pointer of type atomic_t
@@ -111,21 +451,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
111} 451}
112#endif 452#endif
113 453
114#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
115static inline void atomic_or(int i, atomic_t *v)
116{
117 int old;
118 int new;
119
120 do {
121 old = atomic_read(v);
122 new = old | i;
123 } while (atomic_cmpxchg(v, old, new) != old);
124}
125#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
126
127#include <asm-generic/atomic-long.h> 454#include <asm-generic/atomic-long.h>
128#ifdef CONFIG_GENERIC_ATOMIC64 455#ifdef CONFIG_GENERIC_ATOMIC64
129#include <asm-generic/atomic64.h> 456#include <asm-generic/atomic64.h>
130#endif 457#endif
458
459#ifndef atomic64_andnot
460static inline void atomic64_andnot(long long i, atomic64_t *v)
461{
462 atomic64_and(~i, v);
463}
464#endif
465
131#endif /* _LINUX_ATOMIC_H */ 466#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index c2e7e3a83965..b2abc996c25d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -27,6 +27,9 @@
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <uapi/linux/audit.h> 28#include <uapi/linux/audit.h>
29 29
30#define AUDIT_INO_UNSET ((unsigned long)-1)
31#define AUDIT_DEV_UNSET ((dev_t)-1)
32
30struct audit_sig_info { 33struct audit_sig_info {
31 uid_t uid; 34 uid_t uid;
32 pid_t pid; 35 pid_t pid;
@@ -59,6 +62,7 @@ struct audit_krule {
59 struct audit_field *inode_f; /* quick access to an inode field */ 62 struct audit_field *inode_f; /* quick access to an inode field */
60 struct audit_watch *watch; /* associated watch */ 63 struct audit_watch *watch; /* associated watch */
61 struct audit_tree *tree; /* associated watched tree */ 64 struct audit_tree *tree; /* associated watched tree */
65 struct audit_fsnotify_mark *exe;
62 struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ 66 struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
63 struct list_head list; /* for AUDIT_LIST* purposes only */ 67 struct list_head list; /* for AUDIT_LIST* purposes only */
64 u64 prio; 68 u64 prio;
diff --git a/include/linux/average.h b/include/linux/average.h
index c6028fd742c1..d04aa58280de 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -3,28 +3,43 @@
3 3
4/* Exponentially weighted moving average (EWMA) */ 4/* Exponentially weighted moving average (EWMA) */
5 5
6/* For more documentation see lib/average.c */ 6#define DECLARE_EWMA(name, _factor, _weight) \
7 7 struct ewma_##name { \
8struct ewma { 8 unsigned long internal; \
9 unsigned long internal; 9 }; \
10 unsigned long factor; 10 static inline void ewma_##name##_init(struct ewma_##name *e) \
11 unsigned long weight; 11 { \
12}; 12 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
13 13 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
14extern void ewma_init(struct ewma *avg, unsigned long factor, 14 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
15 unsigned long weight); 15 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
16 16 e->internal = 0; \
17extern struct ewma *ewma_add(struct ewma *avg, unsigned long val); 17 } \
18 18 static inline unsigned long \
19/** 19 ewma_##name##_read(struct ewma_##name *e) \
20 * ewma_read() - Get average value 20 { \
21 * @avg: Average structure 21 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
22 * 22 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
23 * Returns the average value held in @avg. 23 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
24 */ 24 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
25static inline unsigned long ewma_read(const struct ewma *avg) 25 return e->internal >> ilog2(_factor); \
26{ 26 } \
27 return avg->internal >> avg->factor; 27 static inline void ewma_##name##_add(struct ewma_##name *e, \
28} 28 unsigned long val) \
29 { \
30 unsigned long internal = ACCESS_ONCE(e->internal); \
31 unsigned long weight = ilog2(_weight); \
32 unsigned long factor = ilog2(_factor); \
33 \
34 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
35 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
36 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
37 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
38 \
39 ACCESS_ONCE(e->internal) = internal ? \
40 (((internal << weight) - internal) + \
41 (val << factor)) >> weight : \
42 (val << factor); \
43 }
29 44
30#endif /* _LINUX_AVERAGE_H */ 45#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0fe9df983ab7..5a5d79ee256f 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -286,7 +286,7 @@ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi
286 * %current's blkcg equals the effective blkcg of its memcg. No 286 * %current's blkcg equals the effective blkcg of its memcg. No
287 * need to use the relatively expensive cgroup_get_e_css(). 287 * need to use the relatively expensive cgroup_get_e_css().
288 */ 288 */
289 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id))) 289 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
290 return wb; 290 return wb;
291 return NULL; 291 return NULL;
292} 292}
@@ -402,7 +402,7 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
402} 402}
403 403
404struct wb_iter { 404struct wb_iter {
405 int start_blkcg_id; 405 int start_memcg_id;
406 struct radix_tree_iter tree_iter; 406 struct radix_tree_iter tree_iter;
407 void **slot; 407 void **slot;
408}; 408};
@@ -414,9 +414,9 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
414 414
415 WARN_ON_ONCE(!rcu_read_lock_held()); 415 WARN_ON_ONCE(!rcu_read_lock_held());
416 416
417 if (iter->start_blkcg_id >= 0) { 417 if (iter->start_memcg_id >= 0) {
418 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id); 418 iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
419 iter->start_blkcg_id = -1; 419 iter->start_memcg_id = -1;
420 } else { 420 } else {
421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0); 421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
422 } 422 }
@@ -430,30 +430,30 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
430 430
431static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, 431static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
432 struct backing_dev_info *bdi, 432 struct backing_dev_info *bdi,
433 int start_blkcg_id) 433 int start_memcg_id)
434{ 434{
435 iter->start_blkcg_id = start_blkcg_id; 435 iter->start_memcg_id = start_memcg_id;
436 436
437 if (start_blkcg_id) 437 if (start_memcg_id)
438 return __wb_iter_next(iter, bdi); 438 return __wb_iter_next(iter, bdi);
439 else 439 else
440 return &bdi->wb; 440 return &bdi->wb;
441} 441}
442 442
443/** 443/**
444 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order 444 * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
445 * @wb_cur: cursor struct bdi_writeback pointer 445 * @wb_cur: cursor struct bdi_writeback pointer
446 * @bdi: bdi to walk wb's of 446 * @bdi: bdi to walk wb's of
447 * @iter: pointer to struct wb_iter to be used as iteration buffer 447 * @iter: pointer to struct wb_iter to be used as iteration buffer
448 * @start_blkcg_id: blkcg ID to start iteration from 448 * @start_memcg_id: memcg ID to start iteration from
449 * 449 *
450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending 450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
451 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter 451 * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
452 * to be used as temp storage during iteration. rcu_read_lock() must be 452 * to be used as temp storage during iteration. rcu_read_lock() must be
453 * held throughout iteration. 453 * held throughout iteration.
454 */ 454 */
455#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ 455#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \ 456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) 457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
458 458
459#else /* CONFIG_CGROUP_WRITEBACK */ 459#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 14eea946e640..ed3768f4ecc7 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -75,5 +75,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
75#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */ 75#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
76#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) 76#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
77#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ 77#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
78#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
78 79
79#endif /* __BASIC_MMIO_GPIO_H */ 80#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf65ca2..cf038431a5cc 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
640 spinlock_t gpio_lock; 640 spinlock_t gpio_lock;
641#ifdef CONFIG_BCMA_DRIVER_GPIO 641#ifdef CONFIG_BCMA_DRIVER_GPIO
642 struct gpio_chip gpio; 642 struct gpio_chip gpio;
643 struct irq_domain *irq_domain;
644#endif 643#endif
645}; 644};
646 645
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5e963a6d7c14..b9b6e046b52e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -187,17 +187,6 @@ static inline void *bio_data(struct bio *bio)
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188 188
189/* 189/*
190 * Check if adding a bio_vec after bprv with offset would create a gap in
191 * the SG list. Most drivers don't care about this, but some do.
192 */
193static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
194{
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
196}
197
198#define bio_io_error(bio) bio_endio((bio), -EIO)
199
200/*
201 * drivers should _never_ use the all version - the bio may have been split 190 * drivers should _never_ use the all version - the bio may have been split
202 * before it got to the driver and the driver won't own all of it 191 * before it got to the driver and the driver won't own all of it
203 */ 192 */
@@ -306,6 +295,21 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
306 atomic_set(&bio->__bi_cnt, count); 295 atomic_set(&bio->__bi_cnt, count);
307} 296}
308 297
298static inline bool bio_flagged(struct bio *bio, unsigned int bit)
299{
300 return (bio->bi_flags & (1U << bit)) != 0;
301}
302
303static inline void bio_set_flag(struct bio *bio, unsigned int bit)
304{
305 bio->bi_flags |= (1U << bit);
306}
307
308static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
309{
310 bio->bi_flags &= ~(1U << bit);
311}
312
309enum bip_flags { 313enum bip_flags {
310 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 314 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
311 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 315 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -426,7 +430,14 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
426 430
427} 431}
428 432
429extern void bio_endio(struct bio *, int); 433extern void bio_endio(struct bio *);
434
435static inline void bio_io_error(struct bio *bio)
436{
437 bio->bi_error = -EIO;
438 bio_endio(bio);
439}
440
430struct request_queue; 441struct request_queue;
431extern int bio_phys_segments(struct request_queue *, struct bio *); 442extern int bio_phys_segments(struct request_queue *, struct bio *);
432 443
@@ -440,7 +451,6 @@ void bio_chain(struct bio *, struct bio *);
440extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 451extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
441extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 452extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
442 unsigned int, unsigned int); 453 unsigned int, unsigned int);
443extern int bio_get_nr_vecs(struct block_device *);
444struct rq_map_data; 454struct rq_map_data;
445extern struct bio *bio_map_user_iov(struct request_queue *, 455extern struct bio *bio_map_user_iov(struct request_queue *,
446 const struct iov_iter *, gfp_t); 456 const struct iov_iter *, gfp_t);
@@ -717,7 +727,7 @@ extern void bio_integrity_free(struct bio *);
717extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 727extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
718extern bool bio_integrity_enabled(struct bio *bio); 728extern bool bio_integrity_enabled(struct bio *bio);
719extern int bio_integrity_prep(struct bio *); 729extern int bio_integrity_prep(struct bio *);
720extern void bio_integrity_endio(struct bio *, int); 730extern void bio_integrity_endio(struct bio *);
721extern void bio_integrity_advance(struct bio *, unsigned int); 731extern void bio_integrity_advance(struct bio *, unsigned int);
722extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 732extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
723extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 733extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index ea17cca9e685..9653fdb76a42 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
295 return find_first_zero_bit(src, nbits) == nbits; 295 return find_first_zero_bit(src, nbits) == nbits;
296} 296}
297 297
298static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) 298static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
299{ 299{
300 if (small_const_nbits(nbits)) 300 if (small_const_nbits(nbits))
301 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 301 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 297f5bda4fdf..e63553386ae7 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w);
57 (bit) < (size); \ 57 (bit) < (size); \
58 (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 58 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
59 59
60static __inline__ int get_bitmask_order(unsigned int count) 60static inline int get_bitmask_order(unsigned int count)
61{ 61{
62 int order; 62 int order;
63 63
@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
65 return order; /* We could be slightly more clever with -1 here... */ 65 return order; /* We could be slightly more clever with -1 here... */
66} 66}
67 67
68static __inline__ int get_count_order(unsigned int count) 68static inline int get_count_order(unsigned int count)
69{ 69{
70 int order; 70 int order;
71 71
@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count)
75 return order; 75 return order;
76} 76}
77 77
78static inline unsigned long hweight_long(unsigned long w) 78static __always_inline unsigned long hweight_long(unsigned long w)
79{ 79{
80 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 80 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
81} 81}
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 1b62d768c7df..0a5cc7a1109b 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,12 +14,15 @@
14 */ 14 */
15 15
16#include <linux/cgroup.h> 16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h> 17#include <linux/percpu_counter.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/radix-tree.h> 19#include <linux/radix-tree.h>
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/atomic.h> 21#include <linux/atomic.h>
22 22
23/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
24#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
25
23/* Max limits for throttle policy */ 26/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX 27#define THROTL_IOPS_MAX UINT_MAX
25 28
@@ -45,7 +48,7 @@ struct blkcg {
45 struct blkcg_gq *blkg_hint; 48 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list; 49 struct hlist_head blkg_list;
47 50
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS]; 51 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
49 52
50 struct list_head all_blkcgs_node; 53 struct list_head all_blkcgs_node;
51#ifdef CONFIG_CGROUP_WRITEBACK 54#ifdef CONFIG_CGROUP_WRITEBACK
@@ -53,14 +56,19 @@ struct blkcg {
53#endif 56#endif
54}; 57};
55 58
59/*
60 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
61 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
62 * to carry result values from read and sum operations.
63 */
56struct blkg_stat { 64struct blkg_stat {
57 struct u64_stats_sync syncp; 65 struct percpu_counter cpu_cnt;
58 uint64_t cnt; 66 atomic64_t aux_cnt;
59}; 67};
60 68
61struct blkg_rwstat { 69struct blkg_rwstat {
62 struct u64_stats_sync syncp; 70 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
63 uint64_t cnt[BLKG_RWSTAT_NR]; 71 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
64}; 72};
65 73
66/* 74/*
@@ -68,32 +76,28 @@ struct blkg_rwstat {
68 * request_queue (q). This is used by blkcg policies which need to track 76 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair. 77 * information per blkcg - q pair.
70 * 78 *
71 * There can be multiple active blkcg policies and each has its private 79 * There can be multiple active blkcg policies and each blkg:policy pair is
72 * data on each blkg, the size of which is determined by 80 * represented by a blkg_policy_data which is allocated and freed by each
73 * blkcg_policy->pd_size. blkcg core allocates and frees such areas 81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
74 * together with blkg and invokes pd_init/exit_fn() methods. 82 * area by allocating larger data structure which embeds blkg_policy_data
75 * 83 * at the beginning.
76 * Such private data must embed struct blkg_policy_data (pd) at the
77 * beginning and pd_size can't be smaller than pd.
78 */ 84 */
79struct blkg_policy_data { 85struct blkg_policy_data {
80 /* the blkg and policy id this per-policy data belongs to */ 86 /* the blkg and policy id this per-policy data belongs to */
81 struct blkcg_gq *blkg; 87 struct blkcg_gq *blkg;
82 int plid; 88 int plid;
83
84 /* used during policy activation */
85 struct list_head alloc_node;
86}; 89};
87 90
88/* 91/*
89 * Policies that need to keep per-blkcg data which is independent 92 * Policies that need to keep per-blkcg data which is independent from any
90 * from any request_queue associated to it must specify its size 93 * request_queue associated to it should implement cpd_alloc/free_fn()
91 * with the cpd_size field of the blkcg_policy structure and 94 * methods. A policy can allocate private data area by allocating larger
92 * embed a blkcg_policy_data in it. cpd_init() is invoked to let 95 * data structure which embeds blkcg_policy_data at the beginning.
93 * each policy handle per-blkcg data. 96 * cpd_init() is invoked to let each policy handle per-blkcg data.
94 */ 97 */
95struct blkcg_policy_data { 98struct blkcg_policy_data {
96 /* the policy id this per-policy data belongs to */ 99 /* the blkcg and policy id this per-policy data belongs to */
100 struct blkcg *blkcg;
97 int plid; 101 int plid;
98}; 102};
99 103
@@ -123,40 +127,50 @@ struct blkcg_gq {
123 /* is this blkg online? protected by both blkcg and q locks */ 127 /* is this blkg online? protected by both blkcg and q locks */
124 bool online; 128 bool online;
125 129
130 struct blkg_rwstat stat_bytes;
131 struct blkg_rwstat stat_ios;
132
126 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 133 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
127 134
128 struct rcu_head rcu_head; 135 struct rcu_head rcu_head;
129}; 136};
130 137
131typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg); 138typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
132typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); 139typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
133typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); 140typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
134typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); 141typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
135typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); 142typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
136typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); 143typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
144typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
145typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
146typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
147typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
137 148
138struct blkcg_policy { 149struct blkcg_policy {
139 int plid; 150 int plid;
140 /* policy specific private data size */
141 size_t pd_size;
142 /* policy specific per-blkcg data size */
143 size_t cpd_size;
144 /* cgroup files for the policy */ 151 /* cgroup files for the policy */
145 struct cftype *cftypes; 152 struct cftype *dfl_cftypes;
153 struct cftype *legacy_cftypes;
146 154
147 /* operations */ 155 /* operations */
156 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
148 blkcg_pol_init_cpd_fn *cpd_init_fn; 157 blkcg_pol_init_cpd_fn *cpd_init_fn;
158 blkcg_pol_free_cpd_fn *cpd_free_fn;
159 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
160
161 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
149 blkcg_pol_init_pd_fn *pd_init_fn; 162 blkcg_pol_init_pd_fn *pd_init_fn;
150 blkcg_pol_online_pd_fn *pd_online_fn; 163 blkcg_pol_online_pd_fn *pd_online_fn;
151 blkcg_pol_offline_pd_fn *pd_offline_fn; 164 blkcg_pol_offline_pd_fn *pd_offline_fn;
152 blkcg_pol_exit_pd_fn *pd_exit_fn; 165 blkcg_pol_free_pd_fn *pd_free_fn;
153 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 166 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
154}; 167};
155 168
156extern struct blkcg blkcg_root; 169extern struct blkcg blkcg_root;
157extern struct cgroup_subsys_state * const blkcg_root_css; 170extern struct cgroup_subsys_state * const blkcg_root_css;
158 171
159struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); 172struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
173 struct request_queue *q, bool update_hint);
160struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 174struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
161 struct request_queue *q); 175 struct request_queue *q);
162int blkcg_init_queue(struct request_queue *q); 176int blkcg_init_queue(struct request_queue *q);
@@ -171,6 +185,7 @@ int blkcg_activate_policy(struct request_queue *q,
171void blkcg_deactivate_policy(struct request_queue *q, 185void blkcg_deactivate_policy(struct request_queue *q,
172 const struct blkcg_policy *pol); 186 const struct blkcg_policy *pol);
173 187
188const char *blkg_dev_name(struct blkcg_gq *blkg);
174void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 189void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
175 u64 (*prfill)(struct seq_file *, 190 u64 (*prfill)(struct seq_file *,
176 struct blkg_policy_data *, int), 191 struct blkg_policy_data *, int),
@@ -182,19 +197,24 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
182u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); 197u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
183u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 198u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
184 int off); 199 int off);
200int blkg_print_stat_bytes(struct seq_file *sf, void *v);
201int blkg_print_stat_ios(struct seq_file *sf, void *v);
202int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
203int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
185 204
186u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off); 205u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
187struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, 206 struct blkcg_policy *pol, int off);
188 int off); 207struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
208 struct blkcg_policy *pol, int off);
189 209
190struct blkg_conf_ctx { 210struct blkg_conf_ctx {
191 struct gendisk *disk; 211 struct gendisk *disk;
192 struct blkcg_gq *blkg; 212 struct blkcg_gq *blkg;
193 u64 v; 213 char *body;
194}; 214};
195 215
196int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 216int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
197 const char *input, struct blkg_conf_ctx *ctx); 217 char *input, struct blkg_conf_ctx *ctx);
198void blkg_conf_finish(struct blkg_conf_ctx *ctx); 218void blkg_conf_finish(struct blkg_conf_ctx *ctx);
199 219
200 220
@@ -205,7 +225,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
205 225
206static inline struct blkcg *task_blkcg(struct task_struct *tsk) 226static inline struct blkcg *task_blkcg(struct task_struct *tsk)
207{ 227{
208 return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); 228 return css_to_blkcg(task_css(tsk, io_cgrp_id));
209} 229}
210 230
211static inline struct blkcg *bio_blkcg(struct bio *bio) 231static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -218,7 +238,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
218static inline struct cgroup_subsys_state * 238static inline struct cgroup_subsys_state *
219task_get_blkcg_css(struct task_struct *task) 239task_get_blkcg_css(struct task_struct *task)
220{ 240{
221 return task_get_css(task, blkio_cgrp_id); 241 return task_get_css(task, io_cgrp_id);
222} 242}
223 243
224/** 244/**
@@ -233,6 +253,52 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
233} 253}
234 254
235/** 255/**
256 * __blkg_lookup - internal version of blkg_lookup()
257 * @blkcg: blkcg of interest
258 * @q: request_queue of interest
259 * @update_hint: whether to update lookup hint with the result or not
260 *
261 * This is internal version and shouldn't be used by policy
262 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
263 * @q's bypass state. If @update_hint is %true, the caller should be
264 * holding @q->queue_lock and lookup hint is updated on success.
265 */
266static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
267 struct request_queue *q,
268 bool update_hint)
269{
270 struct blkcg_gq *blkg;
271
272 if (blkcg == &blkcg_root)
273 return q->root_blkg;
274
275 blkg = rcu_dereference(blkcg->blkg_hint);
276 if (blkg && blkg->q == q)
277 return blkg;
278
279 return blkg_lookup_slowpath(blkcg, q, update_hint);
280}
281
282/**
283 * blkg_lookup - lookup blkg for the specified blkcg - q pair
284 * @blkcg: blkcg of interest
285 * @q: request_queue of interest
286 *
287 * Lookup blkg for the @blkcg - @q pair. This function should be called
288 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
289 * - see blk_queue_bypass_start() for details.
290 */
291static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
292 struct request_queue *q)
293{
294 WARN_ON_ONCE(!rcu_read_lock_held());
295
296 if (unlikely(blk_queue_bypass(q)))
297 return NULL;
298 return __blkg_lookup(blkcg, q, false);
299}
300
301/**
236 * blkg_to_pdata - get policy private data 302 * blkg_to_pdata - get policy private data
237 * @blkg: blkg of interest 303 * @blkg: blkg of interest
238 * @pol: policy of interest 304 * @pol: policy of interest
@@ -248,7 +314,7 @@ static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
248static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 314static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
249 struct blkcg_policy *pol) 315 struct blkcg_policy *pol)
250{ 316{
251 return blkcg ? blkcg->pd[pol->plid] : NULL; 317 return blkcg ? blkcg->cpd[pol->plid] : NULL;
252} 318}
253 319
254/** 320/**
@@ -262,6 +328,11 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
262 return pd ? pd->blkg : NULL; 328 return pd ? pd->blkg : NULL;
263} 329}
264 330
331static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
332{
333 return cpd ? cpd->blkcg : NULL;
334}
335
265/** 336/**
266 * blkg_path - format cgroup path of blkg 337 * blkg_path - format cgroup path of blkg
267 * @blkg: blkg of interest 338 * @blkg: blkg of interest
@@ -309,9 +380,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
309 call_rcu(&blkg->rcu_head, __blkg_release_rcu); 380 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
310} 381}
311 382
312struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
313 bool update_hint);
314
315/** 383/**
316 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 384 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
317 * @d_blkg: loop cursor pointing to the current descendant 385 * @d_blkg: loop cursor pointing to the current descendant
@@ -373,8 +441,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
373 * or if either the blkcg or queue is going away. Fall back to 441 * or if either the blkcg or queue is going away. Fall back to
374 * root_rl in such cases. 442 * root_rl in such cases.
375 */ 443 */
376 blkg = blkg_lookup_create(blkcg, q); 444 blkg = blkg_lookup(blkcg, q);
377 if (unlikely(IS_ERR(blkg))) 445 if (unlikely(!blkg))
378 goto root_rl; 446 goto root_rl;
379 447
380 blkg_get(blkg); 448 blkg_get(blkg);
@@ -394,8 +462,7 @@ root_rl:
394 */ 462 */
395static inline void blk_put_rl(struct request_list *rl) 463static inline void blk_put_rl(struct request_list *rl)
396{ 464{
397 /* root_rl may not have blkg set */ 465 if (rl->blkg->blkcg != &blkcg_root)
398 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
399 blkg_put(rl->blkg); 466 blkg_put(rl->blkg);
400} 467}
401 468
@@ -433,9 +500,21 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
433#define blk_queue_for_each_rl(rl, q) \ 500#define blk_queue_for_each_rl(rl, q) \
434 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) 501 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
435 502
436static inline void blkg_stat_init(struct blkg_stat *stat) 503static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
437{ 504{
438 u64_stats_init(&stat->syncp); 505 int ret;
506
507 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
508 if (ret)
509 return ret;
510
511 atomic64_set(&stat->aux_cnt, 0);
512 return 0;
513}
514
515static inline void blkg_stat_exit(struct blkg_stat *stat)
516{
517 percpu_counter_destroy(&stat->cpu_cnt);
439} 518}
440 519
441/** 520/**
@@ -443,34 +522,21 @@ static inline void blkg_stat_init(struct blkg_stat *stat)
443 * @stat: target blkg_stat 522 * @stat: target blkg_stat
444 * @val: value to add 523 * @val: value to add
445 * 524 *
446 * Add @val to @stat. The caller is responsible for synchronizing calls to 525 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
447 * this function. 526 * don't re-enter this function for the same counter.
448 */ 527 */
449static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) 528static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
450{ 529{
451 u64_stats_update_begin(&stat->syncp); 530 __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
452 stat->cnt += val;
453 u64_stats_update_end(&stat->syncp);
454} 531}
455 532
456/** 533/**
457 * blkg_stat_read - read the current value of a blkg_stat 534 * blkg_stat_read - read the current value of a blkg_stat
458 * @stat: blkg_stat to read 535 * @stat: blkg_stat to read
459 *
460 * Read the current value of @stat. This function can be called without
461 * synchroniztion and takes care of u64 atomicity.
462 */ 536 */
463static inline uint64_t blkg_stat_read(struct blkg_stat *stat) 537static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
464{ 538{
465 unsigned int start; 539 return percpu_counter_sum_positive(&stat->cpu_cnt);
466 uint64_t v;
467
468 do {
469 start = u64_stats_fetch_begin_irq(&stat->syncp);
470 v = stat->cnt;
471 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
472
473 return v;
474} 540}
475 541
476/** 542/**
@@ -479,24 +545,46 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
479 */ 545 */
480static inline void blkg_stat_reset(struct blkg_stat *stat) 546static inline void blkg_stat_reset(struct blkg_stat *stat)
481{ 547{
482 stat->cnt = 0; 548 percpu_counter_set(&stat->cpu_cnt, 0);
549 atomic64_set(&stat->aux_cnt, 0);
483} 550}
484 551
485/** 552/**
486 * blkg_stat_merge - merge a blkg_stat into another 553 * blkg_stat_add_aux - add a blkg_stat into another's aux count
487 * @to: the destination blkg_stat 554 * @to: the destination blkg_stat
488 * @from: the source 555 * @from: the source
489 * 556 *
490 * Add @from's count to @to. 557 * Add @from's count including the aux one to @to's aux count.
491 */ 558 */
492static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) 559static inline void blkg_stat_add_aux(struct blkg_stat *to,
560 struct blkg_stat *from)
493{ 561{
494 blkg_stat_add(to, blkg_stat_read(from)); 562 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
563 &to->aux_cnt);
495} 564}
496 565
497static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) 566static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
498{ 567{
499 u64_stats_init(&rwstat->syncp); 568 int i, ret;
569
570 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
571 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
572 if (ret) {
573 while (--i >= 0)
574 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
575 return ret;
576 }
577 atomic64_set(&rwstat->aux_cnt[i], 0);
578 }
579 return 0;
580}
581
582static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
583{
584 int i;
585
586 for (i = 0; i < BLKG_RWSTAT_NR; i++)
587 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
500} 588}
501 589
502/** 590/**
@@ -511,39 +599,38 @@ static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
511static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 599static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
512 int rw, uint64_t val) 600 int rw, uint64_t val)
513{ 601{
514 u64_stats_update_begin(&rwstat->syncp); 602 struct percpu_counter *cnt;
515 603
516 if (rw & REQ_WRITE) 604 if (rw & REQ_WRITE)
517 rwstat->cnt[BLKG_RWSTAT_WRITE] += val; 605 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
518 else 606 else
519 rwstat->cnt[BLKG_RWSTAT_READ] += val; 607 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
608
609 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
610
520 if (rw & REQ_SYNC) 611 if (rw & REQ_SYNC)
521 rwstat->cnt[BLKG_RWSTAT_SYNC] += val; 612 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
522 else 613 else
523 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; 614 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
524 615
525 u64_stats_update_end(&rwstat->syncp); 616 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
526} 617}
527 618
528/** 619/**
529 * blkg_rwstat_read - read the current values of a blkg_rwstat 620 * blkg_rwstat_read - read the current values of a blkg_rwstat
530 * @rwstat: blkg_rwstat to read 621 * @rwstat: blkg_rwstat to read
531 * 622 *
532 * Read the current snapshot of @rwstat and return it as the return value. 623 * Read the current snapshot of @rwstat and return it in the aux counts.
533 * This function can be called without synchronization and takes care of
534 * u64 atomicity.
535 */ 624 */
536static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) 625static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
537{ 626{
538 unsigned int start; 627 struct blkg_rwstat result;
539 struct blkg_rwstat tmp; 628 int i;
540
541 do {
542 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
543 tmp = *rwstat;
544 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
545 629
546 return tmp; 630 for (i = 0; i < BLKG_RWSTAT_NR; i++)
631 atomic64_set(&result.aux_cnt[i],
632 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
633 return result;
547} 634}
548 635
549/** 636/**
@@ -558,7 +645,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
558{ 645{
559 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); 646 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
560 647
561 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; 648 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
649 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
562} 650}
563 651
564/** 652/**
@@ -567,26 +655,71 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
567 */ 655 */
568static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) 656static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
569{ 657{
570 memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); 658 int i;
659
660 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
661 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
662 atomic64_set(&rwstat->aux_cnt[i], 0);
663 }
571} 664}
572 665
573/** 666/**
574 * blkg_rwstat_merge - merge a blkg_rwstat into another 667 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
575 * @to: the destination blkg_rwstat 668 * @to: the destination blkg_rwstat
576 * @from: the source 669 * @from: the source
577 * 670 *
578 * Add @from's counts to @to. 671 * Add @from's count including the aux one to @to's aux count.
579 */ 672 */
580static inline void blkg_rwstat_merge(struct blkg_rwstat *to, 673static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
581 struct blkg_rwstat *from) 674 struct blkg_rwstat *from)
582{ 675{
583 struct blkg_rwstat v = blkg_rwstat_read(from); 676 struct blkg_rwstat v = blkg_rwstat_read(from);
584 int i; 677 int i;
585 678
586 u64_stats_update_begin(&to->syncp);
587 for (i = 0; i < BLKG_RWSTAT_NR; i++) 679 for (i = 0; i < BLKG_RWSTAT_NR; i++)
588 to->cnt[i] += v.cnt[i]; 680 atomic64_add(atomic64_read(&v.aux_cnt[i]) +
589 u64_stats_update_end(&to->syncp); 681 atomic64_read(&from->aux_cnt[i]),
682 &to->aux_cnt[i]);
683}
684
685#ifdef CONFIG_BLK_DEV_THROTTLING
686extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
687 struct bio *bio);
688#else
689static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
690 struct bio *bio) { return false; }
691#endif
692
693static inline bool blkcg_bio_issue_check(struct request_queue *q,
694 struct bio *bio)
695{
696 struct blkcg *blkcg;
697 struct blkcg_gq *blkg;
698 bool throtl = false;
699
700 rcu_read_lock();
701 blkcg = bio_blkcg(bio);
702
703 blkg = blkg_lookup(blkcg, q);
704 if (unlikely(!blkg)) {
705 spin_lock_irq(q->queue_lock);
706 blkg = blkg_lookup_create(blkcg, q);
707 if (IS_ERR(blkg))
708 blkg = NULL;
709 spin_unlock_irq(q->queue_lock);
710 }
711
712 throtl = blk_throtl_bio(q, blkg, bio);
713
714 if (!throtl) {
715 blkg = blkg ?: q->root_blkg;
716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
717 bio->bi_iter.bi_size);
718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
719 }
720
721 rcu_read_unlock();
722 return !throtl;
590} 723}
591 724
592#else /* CONFIG_BLK_CGROUP */ 725#else /* CONFIG_BLK_CGROUP */
@@ -642,6 +775,9 @@ static inline void blk_put_rl(struct request_list *rl) { }
642static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } 775static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
643static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } 776static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
644 777
778static inline bool blkcg_bio_issue_check(struct request_queue *q,
779 struct bio *bio) { return true; }
780
645#define blk_queue_for_each_rl(rl, q) \ 781#define blk_queue_for_each_rl(rl, q) \
646 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 782 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
647 783
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 7303b3405520..e8130138f29d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,7 +14,7 @@ struct page;
14struct block_device; 14struct block_device;
15struct io_context; 15struct io_context;
16struct cgroup_subsys_state; 16struct cgroup_subsys_state;
17typedef void (bio_end_io_t) (struct bio *, int); 17typedef void (bio_end_io_t) (struct bio *);
18typedef void (bio_destructor_t) (struct bio *); 18typedef void (bio_destructor_t) (struct bio *);
19 19
20/* 20/*
@@ -46,7 +46,8 @@ struct bvec_iter {
46struct bio { 46struct bio {
47 struct bio *bi_next; /* request queue link */ 47 struct bio *bi_next; /* request queue link */
48 struct block_device *bi_bdev; 48 struct block_device *bi_bdev;
49 unsigned long bi_flags; /* status, command, etc */ 49 unsigned int bi_flags; /* status, command, etc */
50 int bi_error;
50 unsigned long bi_rw; /* bottom bits READ/WRITE, 51 unsigned long bi_rw; /* bottom bits READ/WRITE,
51 * top bits priority 52 * top bits priority
52 */ 53 */
@@ -111,16 +112,14 @@ struct bio {
111/* 112/*
112 * bio flags 113 * bio flags
113 */ 114 */
114#define BIO_UPTODATE 0 /* ok after I/O completion */
115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
116#define BIO_CLONED 2 /* doesn't own data */ 116#define BIO_CLONED 2 /* doesn't own data */
117#define BIO_BOUNCED 3 /* bio is a bounce bio */ 117#define BIO_BOUNCED 3 /* bio is a bounce bio */
118#define BIO_USER_MAPPED 4 /* contains user pages */ 118#define BIO_USER_MAPPED 4 /* contains user pages */
119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */ 119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
120#define BIO_QUIET 6 /* Make BIO Quiet */ 120#define BIO_QUIET 6 /* Make BIO Quiet */
121#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */ 121#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
122#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */ 122#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
123#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
124 123
125/* 124/*
126 * Flags starting here get preserved by bio_reset() - this includes 125 * Flags starting here get preserved by bio_reset() - this includes
@@ -129,14 +128,12 @@ struct bio {
129#define BIO_RESET_BITS 13 128#define BIO_RESET_BITS 13
130#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */ 129#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
131 130
132#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
133
134/* 131/*
135 * top 4 bits of bio flags indicate the pool this bio came from 132 * top 4 bits of bio flags indicate the pool this bio came from
136 */ 133 */
137#define BIO_POOL_BITS (4) 134#define BIO_POOL_BITS (4)
138#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) 135#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
139#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) 136#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
140#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) 137#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
141#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) 138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
142 139
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d4068c17d0df..708923b9b623 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
213typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 213typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
214 214
215struct bio_vec; 215struct bio_vec;
216struct bvec_merge_data {
217 struct block_device *bi_bdev;
218 sector_t bi_sector;
219 unsigned bi_size;
220 unsigned long bi_rw;
221};
222typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
223 struct bio_vec *);
224typedef void (softirq_done_fn)(struct request *); 216typedef void (softirq_done_fn)(struct request *);
225typedef int (dma_drain_needed_fn)(struct request *); 217typedef int (dma_drain_needed_fn)(struct request *);
226typedef int (lld_busy_fn) (struct request_queue *q); 218typedef int (lld_busy_fn) (struct request_queue *q);
@@ -258,6 +250,7 @@ struct blk_queue_tag {
258struct queue_limits { 250struct queue_limits {
259 unsigned long bounce_pfn; 251 unsigned long bounce_pfn;
260 unsigned long seg_boundary_mask; 252 unsigned long seg_boundary_mask;
253 unsigned long virt_boundary_mask;
261 254
262 unsigned int max_hw_sectors; 255 unsigned int max_hw_sectors;
263 unsigned int chunk_sectors; 256 unsigned int chunk_sectors;
@@ -268,6 +261,7 @@ struct queue_limits {
268 unsigned int io_min; 261 unsigned int io_min;
269 unsigned int io_opt; 262 unsigned int io_opt;
270 unsigned int max_discard_sectors; 263 unsigned int max_discard_sectors;
264 unsigned int max_hw_discard_sectors;
271 unsigned int max_write_same_sectors; 265 unsigned int max_write_same_sectors;
272 unsigned int discard_granularity; 266 unsigned int discard_granularity;
273 unsigned int discard_alignment; 267 unsigned int discard_alignment;
@@ -305,7 +299,6 @@ struct request_queue {
305 make_request_fn *make_request_fn; 299 make_request_fn *make_request_fn;
306 prep_rq_fn *prep_rq_fn; 300 prep_rq_fn *prep_rq_fn;
307 unprep_rq_fn *unprep_rq_fn; 301 unprep_rq_fn *unprep_rq_fn;
308 merge_bvec_fn *merge_bvec_fn;
309 softirq_done_fn *softirq_done_fn; 302 softirq_done_fn *softirq_done_fn;
310 rq_timed_out_fn *rq_timed_out_fn; 303 rq_timed_out_fn *rq_timed_out_fn;
311 dma_drain_needed_fn *dma_drain_needed; 304 dma_drain_needed_fn *dma_drain_needed;
@@ -462,6 +455,7 @@ struct request_queue {
462 455
463 struct blk_mq_tag_set *tag_set; 456 struct blk_mq_tag_set *tag_set;
464 struct list_head tag_set_list; 457 struct list_head tag_set_list;
458 struct bio_set *bio_split;
465}; 459};
466 460
467#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 461#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -486,7 +480,6 @@ struct request_queue {
486#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 480#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
487#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 481#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
488#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 482#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
489#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
490 483
491#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 484#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
492 (1 << QUEUE_FLAG_STACKABLE) | \ 485 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -782,6 +775,8 @@ extern void blk_rq_unprep_clone(struct request *rq);
782extern int blk_insert_cloned_request(struct request_queue *q, 775extern int blk_insert_cloned_request(struct request_queue *q,
783 struct request *rq); 776 struct request *rq);
784extern void blk_delay_queue(struct request_queue *, unsigned long); 777extern void blk_delay_queue(struct request_queue *, unsigned long);
778extern void blk_queue_split(struct request_queue *, struct bio **,
779 struct bio_set *);
785extern void blk_recount_segments(struct request_queue *, struct bio *); 780extern void blk_recount_segments(struct request_queue *, struct bio *);
786extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 781extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
787extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 782extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -986,9 +981,9 @@ extern int blk_queue_dma_drain(struct request_queue *q,
986 void *buf, unsigned int size); 981 void *buf, unsigned int size);
987extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 982extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
988extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 983extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
984extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
989extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 985extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
990extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 986extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
991extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
992extern void blk_queue_dma_alignment(struct request_queue *, int); 987extern void blk_queue_dma_alignment(struct request_queue *, int);
993extern void blk_queue_update_dma_alignment(struct request_queue *, int); 988extern void blk_queue_update_dma_alignment(struct request_queue *, int);
994extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 989extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1138,6 +1133,7 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1138enum blk_default_limits { 1133enum blk_default_limits {
1139 BLK_MAX_SEGMENTS = 128, 1134 BLK_MAX_SEGMENTS = 128,
1140 BLK_SAFE_MAX_SECTORS = 255, 1135 BLK_SAFE_MAX_SECTORS = 255,
1136 BLK_DEF_MAX_SECTORS = 2560,
1141 BLK_MAX_SEGMENT_SIZE = 65536, 1137 BLK_MAX_SEGMENT_SIZE = 65536,
1142 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1138 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1143}; 1139};
@@ -1154,6 +1150,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
1154 return q->limits.seg_boundary_mask; 1150 return q->limits.seg_boundary_mask;
1155} 1151}
1156 1152
1153static inline unsigned long queue_virt_boundary(struct request_queue *q)
1154{
1155 return q->limits.virt_boundary_mask;
1156}
1157
1157static inline unsigned int queue_max_sectors(struct request_queue *q) 1158static inline unsigned int queue_max_sectors(struct request_queue *q)
1158{ 1159{
1159 return q->limits.max_sectors; 1160 return q->limits.max_sectors;
@@ -1354,6 +1355,19 @@ static inline void put_dev_sector(Sector p)
1354 page_cache_release(p.v); 1355 page_cache_release(p.v);
1355} 1356}
1356 1357
1358/*
1359 * Check if adding a bio_vec after bprv with offset would create a gap in
1360 * the SG list. Most drivers don't care about this, but some do.
1361 */
1362static inline bool bvec_gap_to_prev(struct request_queue *q,
1363 struct bio_vec *bprv, unsigned int offset)
1364{
1365 if (!queue_virt_boundary(q))
1366 return false;
1367 return offset ||
1368 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1369}
1370
1357struct work_struct; 1371struct work_struct;
1358int kblockd_schedule_work(struct work_struct *work); 1372int kblockd_schedule_work(struct work_struct *work);
1359int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1373int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1555,8 +1569,8 @@ struct block_device_operations {
1555 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1569 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1556 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1570 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1557 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1571 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1558 long (*direct_access)(struct block_device *, sector_t, 1572 long (*direct_access)(struct block_device *, sector_t, void __pmem **,
1559 void **, unsigned long *pfn, long size); 1573 unsigned long *pfn);
1560 unsigned int (*check_events) (struct gendisk *disk, 1574 unsigned int (*check_events) (struct gendisk *disk,
1561 unsigned int clearing); 1575 unsigned int clearing);
1562 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1576 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1574,8 +1588,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1574extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1588extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1575extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1589extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1576 struct writeback_control *); 1590 struct writeback_control *);
1577extern long bdev_direct_access(struct block_device *, sector_t, void **addr, 1591extern long bdev_direct_access(struct block_device *, sector_t,
1578 unsigned long *pfn, long size); 1592 void __pmem **addr, unsigned long *pfn, long size);
1579#else /* CONFIG_BLOCK */ 1593#else /* CONFIG_BLOCK */
1580 1594
1581struct block_device; 1595struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476a0d48..f57d7fed9ec3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
10#include <uapi/linux/bpf.h> 10#include <uapi/linux/bpf.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/perf_event.h>
13 14
14struct bpf_map; 15struct bpf_map;
15 16
@@ -24,6 +25,10 @@ struct bpf_map_ops {
24 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 25 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
25 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 26 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
26 int (*map_delete_elem)(struct bpf_map *map, void *key); 27 int (*map_delete_elem)(struct bpf_map *map, void *key);
28
29 /* funcs called by prog_array and perf_event_array map */
30 void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
31 void (*map_fd_put_ptr) (void *ptr);
27}; 32};
28 33
29struct bpf_map { 34struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
142 bool owner_jited; 147 bool owner_jited;
143 union { 148 union {
144 char value[0] __aligned(8); 149 char value[0] __aligned(8);
145 struct bpf_prog *prog[0] __aligned(8); 150 void *ptrs[0] __aligned(8);
146 }; 151 };
147}; 152};
148#define MAX_TAIL_CALL_CNT 32 153#define MAX_TAIL_CALL_CNT 32
149 154
150u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); 155u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
151void bpf_prog_array_map_clear(struct bpf_map *map); 156void bpf_fd_array_map_clear(struct bpf_map *map);
152bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 157bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
153const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 158const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
154 159
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
185extern const struct bpf_func_proto bpf_map_update_elem_proto; 190extern const struct bpf_func_proto bpf_map_update_elem_proto;
186extern const struct bpf_func_proto bpf_map_delete_elem_proto; 191extern const struct bpf_func_proto bpf_map_delete_elem_proto;
187 192
193extern const struct bpf_func_proto bpf_perf_event_read_proto;
188extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 194extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
189extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 195extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
190extern const struct bpf_func_proto bpf_tail_call_proto; 196extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
192extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 198extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
193extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 199extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
194extern const struct bpf_func_proto bpf_get_current_comm_proto; 200extern const struct bpf_func_proto bpf_get_current_comm_proto;
201extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
202extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
195 203
196#endif /* _LINUX_BPF_H */ 204#endif /* _LINUX_BPF_H */
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 9ebee53d3bf5..397c5cd09794 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -46,6 +46,7 @@ struct ceph_options {
46 unsigned long mount_timeout; /* jiffies */ 46 unsigned long mount_timeout; /* jiffies */
47 unsigned long osd_idle_ttl; /* jiffies */ 47 unsigned long osd_idle_ttl; /* jiffies */
48 unsigned long osd_keepalive_timeout; /* jiffies */ 48 unsigned long osd_keepalive_timeout; /* jiffies */
49 unsigned long monc_ping_timeout; /* jiffies */
49 50
50 /* 51 /*
51 * any type that can't be simply compared or doesn't need need 52 * any type that can't be simply compared or doesn't need need
@@ -66,6 +67,7 @@ struct ceph_options {
66#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) 67#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
67#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) 68#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
68#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) 69#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
70#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000)
69 71
70#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) 72#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
71#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) 73#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 37753278987a..7e1252e97a30 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -248,6 +248,8 @@ struct ceph_connection {
248 int in_base_pos; /* bytes read */ 248 int in_base_pos; /* bytes read */
249 __le64 in_temp_ack; /* for reading an ack */ 249 __le64 in_temp_ack; /* for reading an ack */
250 250
251 struct timespec last_keepalive_ack;
252
251 struct delayed_work work; /* send|recv work */ 253 struct delayed_work work; /* send|recv work */
252 unsigned long delay; /* current delay interval */ 254 unsigned long delay; /* current delay interval */
253}; 255};
@@ -285,6 +287,8 @@ extern void ceph_msg_revoke(struct ceph_msg *msg);
285extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); 287extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
286 288
287extern void ceph_con_keepalive(struct ceph_connection *con); 289extern void ceph_con_keepalive(struct ceph_connection *con);
290extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
291 unsigned long interval);
288 292
289extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 293extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
290 size_t length, size_t alignment); 294 size_t length, size_t alignment);
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 1c1887206ffa..0fe2656ac415 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -84,10 +84,12 @@ struct ceph_entity_inst {
84#define CEPH_MSGR_TAG_MSG 7 /* message */ 84#define CEPH_MSGR_TAG_MSG 7 /* message */
85#define CEPH_MSGR_TAG_ACK 8 /* message ack */ 85#define CEPH_MSGR_TAG_ACK 8 /* message ack */
86#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ 86#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
87#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ 87#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
88#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ 88#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
89#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ 89#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
90#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ 90#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
91#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
92#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
91 93
92 94
93/* 95/*
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 93755a629299..4d8fcf2187dc 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -34,12 +34,17 @@ struct seq_file;
34 34
35/* define the enumeration of all cgroup subsystems */ 35/* define the enumeration of all cgroup subsystems */
36#define SUBSYS(_x) _x ## _cgrp_id, 36#define SUBSYS(_x) _x ## _cgrp_id,
37#define SUBSYS_TAG(_t) CGROUP_ ## _t, \
38 __unused_tag_ ## _t = CGROUP_ ## _t - 1,
37enum cgroup_subsys_id { 39enum cgroup_subsys_id {
38#include <linux/cgroup_subsys.h> 40#include <linux/cgroup_subsys.h>
39 CGROUP_SUBSYS_COUNT, 41 CGROUP_SUBSYS_COUNT,
40}; 42};
43#undef SUBSYS_TAG
41#undef SUBSYS 44#undef SUBSYS
42 45
46#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START)
47
43/* bits in struct cgroup_subsys_state flags field */ 48/* bits in struct cgroup_subsys_state flags field */
44enum { 49enum {
45 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 50 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
@@ -318,7 +323,7 @@ struct cftype {
318 * end of cftype array. 323 * end of cftype array.
319 */ 324 */
320 char name[MAX_CFTYPE_NAME]; 325 char name[MAX_CFTYPE_NAME];
321 int private; 326 unsigned long private;
322 /* 327 /*
323 * If not 0, file mode is set to this value, otherwise it will 328 * If not 0, file mode is set to this value, otherwise it will
324 * be figured out automatically 329 * be figured out automatically
@@ -406,7 +411,9 @@ struct cgroup_subsys {
406 struct cgroup_taskset *tset); 411 struct cgroup_taskset *tset);
407 void (*attach)(struct cgroup_subsys_state *css, 412 void (*attach)(struct cgroup_subsys_state *css,
408 struct cgroup_taskset *tset); 413 struct cgroup_taskset *tset);
409 void (*fork)(struct task_struct *task); 414 int (*can_fork)(struct task_struct *task, void **priv_p);
415 void (*cancel_fork)(struct task_struct *task, void *priv);
416 void (*fork)(struct task_struct *task, void *priv);
410 void (*exit)(struct cgroup_subsys_state *css, 417 void (*exit)(struct cgroup_subsys_state *css,
411 struct cgroup_subsys_state *old_css, 418 struct cgroup_subsys_state *old_css,
412 struct task_struct *task); 419 struct task_struct *task);
@@ -434,6 +441,9 @@ struct cgroup_subsys {
434 int id; 441 int id;
435 const char *name; 442 const char *name;
436 443
444 /* optional, initialized automatically during boot if not set */
445 const char *legacy_name;
446
437 /* link to parent, protected by cgroup_lock() */ 447 /* link to parent, protected by cgroup_lock() */
438 struct cgroup_root *root; 448 struct cgroup_root *root;
439 449
@@ -491,6 +501,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
491 501
492#else /* CONFIG_CGROUPS */ 502#else /* CONFIG_CGROUPS */
493 503
504#define CGROUP_CANFORK_COUNT 0
494#define CGROUP_SUBSYS_COUNT 0 505#define CGROUP_SUBSYS_COUNT 0
495 506
496static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} 507static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a593e299162e..eb7ca55f72ef 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -22,6 +22,15 @@
22 22
23#ifdef CONFIG_CGROUPS 23#ifdef CONFIG_CGROUPS
24 24
25/*
26 * All weight knobs on the default hierarhcy should use the following min,
27 * default and max values. The default value is the logarithmic center of
28 * MIN and MAX and allows 100x to be expressed in both directions.
29 */
30#define CGROUP_WEIGHT_MIN 1
31#define CGROUP_WEIGHT_DFL 100
32#define CGROUP_WEIGHT_MAX 10000
33
25/* a css_task_iter should be treated as an opaque object */ 34/* a css_task_iter should be treated as an opaque object */
26struct css_task_iter { 35struct css_task_iter {
27 struct cgroup_subsys *ss; 36 struct cgroup_subsys *ss;
@@ -62,7 +71,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
62 struct pid *pid, struct task_struct *tsk); 71 struct pid *pid, struct task_struct *tsk);
63 72
64void cgroup_fork(struct task_struct *p); 73void cgroup_fork(struct task_struct *p);
65void cgroup_post_fork(struct task_struct *p); 74extern int cgroup_can_fork(struct task_struct *p,
75 void *ss_priv[CGROUP_CANFORK_COUNT]);
76extern void cgroup_cancel_fork(struct task_struct *p,
77 void *ss_priv[CGROUP_CANFORK_COUNT]);
78extern void cgroup_post_fork(struct task_struct *p,
79 void *old_ss_priv[CGROUP_CANFORK_COUNT]);
66void cgroup_exit(struct task_struct *p); 80void cgroup_exit(struct task_struct *p);
67 81
68int cgroup_init_early(void); 82int cgroup_init_early(void);
@@ -524,7 +538,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
524 struct dentry *dentry) { return -EINVAL; } 538 struct dentry *dentry) { return -EINVAL; }
525 539
526static inline void cgroup_fork(struct task_struct *p) {} 540static inline void cgroup_fork(struct task_struct *p) {}
527static inline void cgroup_post_fork(struct task_struct *p) {} 541static inline int cgroup_can_fork(struct task_struct *p,
542 void *ss_priv[CGROUP_CANFORK_COUNT])
543{ return 0; }
544static inline void cgroup_cancel_fork(struct task_struct *p,
545 void *ss_priv[CGROUP_CANFORK_COUNT]) {}
546static inline void cgroup_post_fork(struct task_struct *p,
547 void *ss_priv[CGROUP_CANFORK_COUNT]) {}
528static inline void cgroup_exit(struct task_struct *p) {} 548static inline void cgroup_exit(struct task_struct *p) {}
529 549
530static inline int cgroup_init_early(void) { return 0; } 550static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e4a96fb14403..1a96fdaa33d5 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -3,6 +3,17 @@
3 * 3 *
4 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. 4 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
5 */ 5 */
6
7/*
8 * This file *must* be included with SUBSYS() defined.
9 * SUBSYS_TAG() is a noop if undefined.
10 */
11
12#ifndef SUBSYS_TAG
13#define __TMP_SUBSYS_TAG
14#define SUBSYS_TAG(_x)
15#endif
16
6#if IS_ENABLED(CONFIG_CPUSETS) 17#if IS_ENABLED(CONFIG_CPUSETS)
7SUBSYS(cpuset) 18SUBSYS(cpuset)
8#endif 19#endif
@@ -16,7 +27,7 @@ SUBSYS(cpuacct)
16#endif 27#endif
17 28
18#if IS_ENABLED(CONFIG_BLK_CGROUP) 29#if IS_ENABLED(CONFIG_BLK_CGROUP)
19SUBSYS(blkio) 30SUBSYS(io)
20#endif 31#endif
21 32
22#if IS_ENABLED(CONFIG_MEMCG) 33#if IS_ENABLED(CONFIG_MEMCG)
@@ -48,11 +59,28 @@ SUBSYS(hugetlb)
48#endif 59#endif
49 60
50/* 61/*
62 * Subsystems that implement the can_fork() family of callbacks.
63 */
64SUBSYS_TAG(CANFORK_START)
65
66#if IS_ENABLED(CONFIG_CGROUP_PIDS)
67SUBSYS(pids)
68#endif
69
70SUBSYS_TAG(CANFORK_END)
71
72/*
51 * The following subsystems are not supported on the default hierarchy. 73 * The following subsystems are not supported on the default hierarchy.
52 */ 74 */
53#if IS_ENABLED(CONFIG_CGROUP_DEBUG) 75#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
54SUBSYS(debug) 76SUBSYS(debug)
55#endif 77#endif
78
79#ifdef __TMP_SUBSYS_TAG
80#undef __TMP_SUBSYS_TAG
81#undef SUBSYS_TAG
82#endif
83
56/* 84/*
57 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. 85 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
58 */ 86 */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 78842f46f152..3ecc07d0da77 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -11,7 +11,6 @@
11#ifndef __LINUX_CLK_PROVIDER_H 11#ifndef __LINUX_CLK_PROVIDER_H
12#define __LINUX_CLK_PROVIDER_H 12#define __LINUX_CLK_PROVIDER_H
13 13
14#include <linux/clk.h>
15#include <linux/io.h> 14#include <linux/io.h>
16#include <linux/of.h> 15#include <linux/of.h>
17 16
@@ -33,11 +32,34 @@
33#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ 32#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
34#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ 33#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
35 34
35struct clk;
36struct clk_hw; 36struct clk_hw;
37struct clk_core; 37struct clk_core;
38struct dentry; 38struct dentry;
39 39
40/** 40/**
41 * struct clk_rate_request - Structure encoding the clk constraints that
42 * a clock user might require.
43 *
44 * @rate: Requested clock rate. This field will be adjusted by
45 * clock drivers according to hardware capabilities.
46 * @min_rate: Minimum rate imposed by clk users.
47 * @max_rate: Maximum rate a imposed by clk users.
48 * @best_parent_rate: The best parent rate a parent can provide to fulfill the
49 * requested constraints.
50 * @best_parent_hw: The most appropriate parent clock that fulfills the
51 * requested constraints.
52 *
53 */
54struct clk_rate_request {
55 unsigned long rate;
56 unsigned long min_rate;
57 unsigned long max_rate;
58 unsigned long best_parent_rate;
59 struct clk_hw *best_parent_hw;
60};
61
62/**
41 * struct clk_ops - Callback operations for hardware clocks; these are to 63 * struct clk_ops - Callback operations for hardware clocks; these are to
42 * be provided by the clock implementation, and will be called by drivers 64 * be provided by the clock implementation, and will be called by drivers
43 * through the clk_* api. 65 * through the clk_* api.
@@ -176,12 +198,8 @@ struct clk_ops {
176 unsigned long parent_rate); 198 unsigned long parent_rate);
177 long (*round_rate)(struct clk_hw *hw, unsigned long rate, 199 long (*round_rate)(struct clk_hw *hw, unsigned long rate,
178 unsigned long *parent_rate); 200 unsigned long *parent_rate);
179 long (*determine_rate)(struct clk_hw *hw, 201 int (*determine_rate)(struct clk_hw *hw,
180 unsigned long rate, 202 struct clk_rate_request *req);
181 unsigned long min_rate,
182 unsigned long max_rate,
183 unsigned long *best_parent_rate,
184 struct clk_hw **best_parent_hw);
185 int (*set_parent)(struct clk_hw *hw, u8 index); 203 int (*set_parent)(struct clk_hw *hw, u8 index);
186 u8 (*get_parent)(struct clk_hw *hw); 204 u8 (*get_parent)(struct clk_hw *hw);
187 int (*set_rate)(struct clk_hw *hw, unsigned long rate, 205 int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -343,6 +361,9 @@ struct clk_div_table {
343 * to the closest integer instead of the up one. 361 * to the closest integer instead of the up one.
344 * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should 362 * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
345 * not be changed by the clock framework. 363 * not be changed by the clock framework.
364 * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
365 * except when the value read from the register is zero, the divisor is
366 * 2^width of the field.
346 */ 367 */
347struct clk_divider { 368struct clk_divider {
348 struct clk_hw hw; 369 struct clk_hw hw;
@@ -360,6 +381,7 @@ struct clk_divider {
360#define CLK_DIVIDER_HIWORD_MASK BIT(3) 381#define CLK_DIVIDER_HIWORD_MASK BIT(3)
361#define CLK_DIVIDER_ROUND_CLOSEST BIT(4) 382#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
362#define CLK_DIVIDER_READ_ONLY BIT(5) 383#define CLK_DIVIDER_READ_ONLY BIT(5)
384#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
363 385
364extern const struct clk_ops clk_divider_ops; 386extern const struct clk_ops clk_divider_ops;
365 387
@@ -550,6 +572,23 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
550void of_gpio_clk_gate_setup(struct device_node *node); 572void of_gpio_clk_gate_setup(struct device_node *node);
551 573
552/** 574/**
575 * struct clk_gpio_mux - gpio controlled clock multiplexer
576 *
577 * @hw: see struct clk_gpio
578 * @gpiod: gpio descriptor to select the parent of this clock multiplexer
579 *
580 * Clock with a gpio control for selecting the parent clock.
581 * Implements .get_parent, .set_parent and .determine_rate
582 */
583
584extern const struct clk_ops clk_gpio_mux_ops;
585struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
586 const char * const *parent_names, u8 num_parents, unsigned gpio,
587 bool active_low, unsigned long flags);
588
589void of_gpio_mux_clk_setup(struct device_node *node);
590
591/**
553 * clk_register - allocate a new clock, register it and return an opaque cookie 592 * clk_register - allocate a new clock, register it and return an opaque cookie
554 * @dev: device that is registering this clock 593 * @dev: device that is registering this clock
555 * @hw: link to hardware-specific clock data 594 * @hw: link to hardware-specific clock data
@@ -568,31 +607,27 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
568 607
569/* helper functions */ 608/* helper functions */
570const char *__clk_get_name(struct clk *clk); 609const char *__clk_get_name(struct clk *clk);
610const char *clk_hw_get_name(const struct clk_hw *hw);
571struct clk_hw *__clk_get_hw(struct clk *clk); 611struct clk_hw *__clk_get_hw(struct clk *clk);
572u8 __clk_get_num_parents(struct clk *clk); 612unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
573struct clk *__clk_get_parent(struct clk *clk); 613struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
574struct clk *clk_get_parent_by_index(struct clk *clk, u8 index); 614struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
615 unsigned int index);
575unsigned int __clk_get_enable_count(struct clk *clk); 616unsigned int __clk_get_enable_count(struct clk *clk);
576unsigned long __clk_get_rate(struct clk *clk); 617unsigned long clk_hw_get_rate(const struct clk_hw *hw);
577unsigned long __clk_get_flags(struct clk *clk); 618unsigned long __clk_get_flags(struct clk *clk);
578bool __clk_is_prepared(struct clk *clk); 619unsigned long clk_hw_get_flags(const struct clk_hw *hw);
620bool clk_hw_is_prepared(const struct clk_hw *hw);
579bool __clk_is_enabled(struct clk *clk); 621bool __clk_is_enabled(struct clk *clk);
580struct clk *__clk_lookup(const char *name); 622struct clk *__clk_lookup(const char *name);
581long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 623int __clk_mux_determine_rate(struct clk_hw *hw,
582 unsigned long min_rate, 624 struct clk_rate_request *req);
583 unsigned long max_rate, 625int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
584 unsigned long *best_parent_rate, 626int __clk_mux_determine_rate_closest(struct clk_hw *hw,
585 struct clk_hw **best_parent_p); 627 struct clk_rate_request *req);
586unsigned long __clk_determine_rate(struct clk_hw *core,
587 unsigned long rate,
588 unsigned long min_rate,
589 unsigned long max_rate);
590long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
591 unsigned long min_rate,
592 unsigned long max_rate,
593 unsigned long *best_parent_rate,
594 struct clk_hw **best_parent_p);
595void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); 628void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
629void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
630 unsigned long max_rate);
596 631
597static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) 632static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
598{ 633{
@@ -603,7 +638,7 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
603/* 638/*
604 * FIXME clock api without lock protection 639 * FIXME clock api without lock protection
605 */ 640 */
606unsigned long __clk_round_rate(struct clk *clk, unsigned long rate); 641unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
607 642
608struct of_device_id; 643struct of_device_id;
609 644
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index f3050e15f833..e0c362363c38 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -7,6 +7,8 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/types.h>
11
10struct device_node; 12struct device_node;
11 13
12#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 14#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 63a8159c4e64..cb19cc1865ca 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,8 +16,20 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18 18
19struct device;
20struct device_node;
21struct generic_pm_domain;
22
19void r8a7778_clocks_init(u32 mode); 23void r8a7778_clocks_init(u32 mode);
20void r8a7779_clocks_init(u32 mode); 24void r8a7779_clocks_init(u32 mode);
21void rcar_gen2_clocks_init(u32 mode); 25void rcar_gen2_clocks_init(u32 mode);
22 26
27#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
28void cpg_mstp_add_clk_domain(struct device_node *np);
29int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
30void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
31#else
32static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
33#endif
34
23#endif 35#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 19c4208f4752..57bf7aab4516 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -17,7 +17,8 @@
17#ifndef __LINUX_CLK_TEGRA_H_ 17#ifndef __LINUX_CLK_TEGRA_H_
18#define __LINUX_CLK_TEGRA_H_ 18#define __LINUX_CLK_TEGRA_H_
19 19
20#include <linux/clk.h> 20#include <linux/types.h>
21#include <linux/bug.h>
21 22
22/* 23/*
23 * Tegra CPU clock and reset control ops 24 * Tegra CPU clock and reset control ops
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 79b76e13d904..223be696df27 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -188,33 +188,6 @@ struct clk_hw_omap {
188/* DPLL Type and DCO Selection Flags */ 188/* DPLL Type and DCO Selection Flags */
189#define DPLL_J_TYPE 0x1 189#define DPLL_J_TYPE 0x1
190 190
191/* Composite clock component types */
192enum {
193 CLK_COMPONENT_TYPE_GATE = 0,
194 CLK_COMPONENT_TYPE_DIVIDER,
195 CLK_COMPONENT_TYPE_MUX,
196 CLK_COMPONENT_TYPE_MAX,
197};
198
199/**
200 * struct ti_dt_clk - OMAP DT clock alias declarations
201 * @lk: clock lookup definition
202 * @node_name: clock DT node to map to
203 */
204struct ti_dt_clk {
205 struct clk_lookup lk;
206 char *node_name;
207};
208
209#define DT_CLK(dev, con, name) \
210 { \
211 .lk = { \
212 .dev_id = dev, \
213 .con_id = con, \
214 }, \
215 .node_name = name, \
216 }
217
218/* Static memmap indices */ 191/* Static memmap indices */
219enum { 192enum {
220 TI_CLKM_CM = 0, 193 TI_CLKM_CM = 0,
@@ -225,8 +198,6 @@ enum {
225 CLK_MAX_MEMMAPS 198 CLK_MAX_MEMMAPS
226}; 199};
227 200
228typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
229
230/** 201/**
231 * struct clk_omap_reg - OMAP register declaration 202 * struct clk_omap_reg - OMAP register declaration
232 * @offset: offset from the master IP module base address 203 * @offset: offset from the master IP module base address
@@ -238,98 +209,62 @@ struct clk_omap_reg {
238}; 209};
239 210
240/** 211/**
241 * struct ti_clk_ll_ops - low-level register access ops for a clock 212 * struct ti_clk_ll_ops - low-level ops for clocks
242 * @clk_readl: pointer to register read function 213 * @clk_readl: pointer to register read function
243 * @clk_writel: pointer to register write function 214 * @clk_writel: pointer to register write function
215 * @clkdm_clk_enable: pointer to clockdomain enable function
216 * @clkdm_clk_disable: pointer to clockdomain disable function
217 * @cm_wait_module_ready: pointer to CM module wait ready function
218 * @cm_split_idlest_reg: pointer to CM module function to split idlest reg
244 * 219 *
245 * Low-level register access ops are generally used by the basic clock types 220 * Low-level ops are generally used by the basic clock types (clk-gate,
246 * (clk-gate, clk-mux, clk-divider etc.) to provide support for various 221 * clk-mux, clk-divider etc.) to provide support for various low-level
247 * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be 222 * hadrware interfaces (direct MMIO, regmap etc.), and is initialized
248 * used by other hardware-specific clock drivers if needed. 223 * by board code. Low-level ops also contain some other platform specific
224 * operations not provided directly by clock drivers.
249 */ 225 */
250struct ti_clk_ll_ops { 226struct ti_clk_ll_ops {
251 u32 (*clk_readl)(void __iomem *reg); 227 u32 (*clk_readl)(void __iomem *reg);
252 void (*clk_writel)(u32 val, void __iomem *reg); 228 void (*clk_writel)(u32 val, void __iomem *reg);
229 int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
230 int (*clkdm_clk_disable)(struct clockdomain *clkdm,
231 struct clk *clk);
232 int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
233 u8 idlest_shift);
234 int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
235 u8 *idlest_reg_id);
253}; 236};
254 237
255extern struct ti_clk_ll_ops *ti_clk_ll_ops;
256
257extern const struct clk_ops ti_clk_divider_ops;
258extern const struct clk_ops ti_clk_mux_ops;
259
260#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) 238#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
261 239
262void omap2_init_clk_hw_omap_clocks(struct clk *clk);
263int omap3_noncore_dpll_enable(struct clk_hw *hw);
264void omap3_noncore_dpll_disable(struct clk_hw *hw);
265int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
266int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
267 unsigned long parent_rate);
268int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
269 unsigned long rate,
270 unsigned long parent_rate,
271 u8 index);
272long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
273 unsigned long rate,
274 unsigned long min_rate,
275 unsigned long max_rate,
276 unsigned long *best_parent_rate,
277 struct clk_hw **best_parent_clk);
278unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
279 unsigned long parent_rate);
280long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
281 unsigned long target_rate,
282 unsigned long *parent_rate);
283long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
284 unsigned long rate,
285 unsigned long min_rate,
286 unsigned long max_rate,
287 unsigned long *best_parent_rate,
288 struct clk_hw **best_parent_clk);
289u8 omap2_init_dpll_parent(struct clk_hw *hw);
290unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
291long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
292 unsigned long *parent_rate);
293void omap2_init_clk_clkdm(struct clk_hw *clk); 240void omap2_init_clk_clkdm(struct clk_hw *clk);
294unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
295 unsigned long parent_rate);
296int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
297 unsigned long parent_rate);
298long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
299 unsigned long *prate);
300int omap2_clkops_enable_clkdm(struct clk_hw *hw);
301void omap2_clkops_disable_clkdm(struct clk_hw *hw);
302int omap2_clk_disable_autoidle_all(void); 241int omap2_clk_disable_autoidle_all(void);
303void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); 242int omap2_clk_enable_autoidle_all(void);
304int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, 243int omap2_clk_allow_idle(struct clk *clk);
305 unsigned long parent_rate); 244int omap2_clk_deny_idle(struct clk *clk);
306int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
307 unsigned long parent_rate, u8 index);
308int omap2_dflt_clk_enable(struct clk_hw *hw);
309void omap2_dflt_clk_disable(struct clk_hw *hw);
310int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
311void omap3_clk_lock_dpll5(void);
312unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, 245unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
313 unsigned long parent_rate); 246 unsigned long parent_rate);
314int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, 247int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
315 unsigned long parent_rate); 248 unsigned long parent_rate);
316void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); 249void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
317void omap2xxx_clkt_vps_init(void); 250void omap2xxx_clkt_vps_init(void);
251unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
318 252
319void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
320void ti_dt_clocks_register(struct ti_dt_clk *oclks);
321void ti_dt_clk_init_provider(struct device_node *np, int index);
322void ti_dt_clk_init_retry_clks(void); 253void ti_dt_clk_init_retry_clks(void);
323void ti_dt_clockdomains_setup(void); 254void ti_dt_clockdomains_setup(void);
324int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, 255int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops);
325 ti_of_clk_init_cb_t func); 256
326int of_ti_clk_autoidle_setup(struct device_node *node); 257struct regmap;
327int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type); 258
259int omap2_clk_provider_init(struct device_node *parent, int index,
260 struct regmap *syscon, void __iomem *mem);
261void omap2_clk_legacy_provider_init(int index, void __iomem *mem);
328 262
329int omap3430_dt_clk_init(void); 263int omap3430_dt_clk_init(void);
330int omap3630_dt_clk_init(void); 264int omap3630_dt_clk_init(void);
331int am35xx_dt_clk_init(void); 265int am35xx_dt_clk_init(void);
332int ti81xx_dt_clk_init(void); 266int dm814x_dt_clk_init(void);
267int dm816x_dt_clk_init(void);
333int omap4xxx_dt_clk_init(void); 268int omap4xxx_dt_clk_init(void);
334int omap5xxx_dt_clk_init(void); 269int omap5xxx_dt_clk_init(void);
335int dra7xx_dt_clk_init(void); 270int dra7xx_dt_clk_init(void);
@@ -338,27 +273,24 @@ int am43xx_dt_clk_init(void);
338int omap2420_dt_clk_init(void); 273int omap2420_dt_clk_init(void);
339int omap2430_dt_clk_init(void); 274int omap2430_dt_clk_init(void);
340 275
341#ifdef CONFIG_OF 276struct ti_clk_features {
342void of_ti_clk_allow_autoidle_all(void); 277 u32 flags;
343void of_ti_clk_deny_autoidle_all(void); 278 long fint_min;
344#else 279 long fint_max;
345static inline void of_ti_clk_allow_autoidle_all(void) { } 280 long fint_band1_max;
346static inline void of_ti_clk_deny_autoidle_all(void) { } 281 long fint_band2_min;
347#endif 282 u8 dpll_bypass_vals;
283 u8 cm_idlest_val;
284};
285
286#define TI_CLK_DPLL_HAS_FREQSEL BIT(0)
287#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
288#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
289
290void ti_clk_setup_features(struct ti_clk_features *features);
291const struct ti_clk_features *ti_clk_get_features(void);
348 292
349extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; 293extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
350extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
351extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
352extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
353extern const struct clk_hw_omap_ops clkhwops_wait;
354extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
355extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
356extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
357extern const struct clk_hw_omap_ops clkhwops_iclk;
358extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
359extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
360extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
361extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
362 294
363#ifdef CONFIG_ATAGS 295#ifdef CONFIG_ATAGS
364int omap3430_clk_legacy_init(void); 296int omap3430_clk_legacy_init(void);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 597a1e836f22..31ce435981fe 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -234,13 +234,10 @@ static inline int tick_check_broadcast_expired(void) { return 0; }
234static inline void tick_setup_hrtimer_broadcast(void) { } 234static inline void tick_setup_hrtimer_broadcast(void) { }
235# endif 235# endif
236 236
237extern int clockevents_notify(unsigned long reason, void *arg);
238
239#else /* !CONFIG_GENERIC_CLOCKEVENTS: */ 237#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
240 238
241static inline void clockevents_suspend(void) { } 239static inline void clockevents_suspend(void) { }
242static inline void clockevents_resume(void) { } 240static inline void clockevents_resume(void) { }
243static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
244static inline int tick_check_broadcast_expired(void) { return 0; } 241static inline int tick_check_broadcast_expired(void) { return 0; }
245static inline void tick_setup_hrtimer_broadcast(void) { } 242static inline void tick_setup_hrtimer_broadcast(void) { }
246 243
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae7c0a4..c836eb2dc44d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -252,7 +252,12 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
253 253
254#define WRITE_ONCE(x, val) \ 254#define WRITE_ONCE(x, val) \
255 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 255({ \
256 union { typeof(x) __val; char __c[1]; } __u = \
257 { .__val = (__force typeof(x)) (val) }; \
258 __write_once_size(&(x), __u.__c, sizeof(x)); \
259 __u.__val; \
260})
256 261
257/** 262/**
258 * READ_ONCE_CTRL - Read a value heading a control dependency 263 * READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index b96bd299966f..008fc67d0d96 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -49,13 +49,28 @@ static inline void exception_exit(enum ctx_state prev_ctx)
49 } 49 }
50} 50}
51 51
52
53/**
54 * ct_state() - return the current context tracking state if known
55 *
56 * Returns the current cpu's context tracking state if context tracking
57 * is enabled. If context tracking is disabled, returns
58 * CONTEXT_DISABLED. This should be used primarily for debugging.
59 */
60static inline enum ctx_state ct_state(void)
61{
62 return context_tracking_is_enabled() ?
63 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
64}
52#else 65#else
53static inline void user_enter(void) { } 66static inline void user_enter(void) { }
54static inline void user_exit(void) { } 67static inline void user_exit(void) { }
55static inline enum ctx_state exception_enter(void) { return 0; } 68static inline enum ctx_state exception_enter(void) { return 0; }
56static inline void exception_exit(enum ctx_state prev_ctx) { } 69static inline void exception_exit(enum ctx_state prev_ctx) { }
70static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
57#endif /* !CONFIG_CONTEXT_TRACKING */ 71#endif /* !CONFIG_CONTEXT_TRACKING */
58 72
73#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
59 74
60#ifdef CONFIG_CONTEXT_TRACKING_FORCE 75#ifdef CONFIG_CONTEXT_TRACKING_FORCE
61extern void context_tracking_init(void); 76extern void context_tracking_init(void);
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 678ecdf90cf6..ee956c528fab 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -14,6 +14,7 @@ struct context_tracking {
14 bool active; 14 bool active;
15 int recursion; 15 int recursion;
16 enum ctx_state { 16 enum ctx_state {
17 CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
17 CONTEXT_KERNEL = 0, 18 CONTEXT_KERNEL = 0,
18 CONTEXT_USER, 19 CONTEXT_USER,
19 CONTEXT_GUEST, 20 CONTEXT_GUEST,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 3486b9082adb..c69e1b932809 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
14#define _LINUX_CORESIGHT_H 14#define _LINUX_CORESIGHT_H
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/sched.h>
17 18
18/* Peripheral id registers (0xFD0-0xFEC) */ 19/* Peripheral id registers (0xFD0-0xFEC) */
19#define CORESIGHT_PERIPHIDR4 0xfd0 20#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -248,4 +249,24 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data(
248 struct device *dev, struct device_node *node) { return NULL; } 249 struct device *dev, struct device_node *node) { return NULL; }
249#endif 250#endif
250 251
252#ifdef CONFIG_PID_NS
253static inline unsigned long
254coresight_vpid_to_pid(unsigned long vpid)
255{
256 struct task_struct *task = NULL;
257 unsigned long pid = 0;
258
259 rcu_read_lock();
260 task = find_task_by_vpid(vpid);
261 if (task)
262 pid = task_pid_nr(task);
263 rcu_read_unlock();
264
265 return pid;
266}
267#else
268static inline unsigned long
269coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
270#endif
271
251#endif 272#endif
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index c4d4eb8ac9fe..986c06c88d81 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -11,6 +11,7 @@
11 11
12#ifdef CONFIG_GENERIC_CPU_AUTOPROBE 12#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
13 13
14#include <linux/init.h>
14#include <linux/mod_devicetable.h> 15#include <linux/mod_devicetable.h>
15#include <asm/cpufeature.h> 16#include <asm/cpufeature.h>
16 17
@@ -43,16 +44,16 @@
43 * For a list of legal values for 'feature', please consult the file 44 * For a list of legal values for 'feature', please consult the file
44 * 'asm/cpufeature.h' of your favorite architecture. 45 * 'asm/cpufeature.h' of your favorite architecture.
45 */ 46 */
46#define module_cpu_feature_match(x, __init) \ 47#define module_cpu_feature_match(x, __initfunc) \
47static struct cpu_feature const cpu_feature_match_ ## x[] = \ 48static struct cpu_feature const cpu_feature_match_ ## x[] = \
48 { { .feature = cpu_feature(x) }, { } }; \ 49 { { .feature = cpu_feature(x) }, { } }; \
49MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ 50MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
50 \ 51 \
51static int cpu_feature_match_ ## x ## _init(void) \ 52static int __init cpu_feature_match_ ## x ## _init(void) \
52{ \ 53{ \
53 if (!cpu_have_feature(cpu_feature(x))) \ 54 if (!cpu_have_feature(cpu_feature(x))) \
54 return -ENODEV; \ 55 return -ENODEV; \
55 return __init(); \ 56 return __initfunc(); \
56} \ 57} \
57module_init(cpu_feature_match_ ## x ## _init) 58module_init(cpu_feature_match_ ## x ## _init)
58 59
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index bde1e567b3a9..430efcbea48e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
51 unsigned int transition_latency; 51 unsigned int transition_latency;
52}; 52};
53 53
54struct cpufreq_real_policy { 54struct cpufreq_user_policy {
55 unsigned int min; /* in kHz */ 55 unsigned int min; /* in kHz */
56 unsigned int max; /* in kHz */ 56 unsigned int max; /* in kHz */
57 unsigned int policy; /* see above */
58 struct cpufreq_governor *governor; /* see below */
59}; 57};
60 58
61struct cpufreq_policy { 59struct cpufreq_policy {
@@ -88,7 +86,7 @@ struct cpufreq_policy {
88 struct work_struct update; /* if update_policy() needs to be 86 struct work_struct update; /* if update_policy() needs to be
89 * called, but you're in IRQ context */ 87 * called, but you're in IRQ context */
90 88
91 struct cpufreq_real_policy user_policy; 89 struct cpufreq_user_policy user_policy;
92 struct cpufreq_frequency_table *freq_table; 90 struct cpufreq_frequency_table *freq_table;
93 91
94 struct list_head policy_list; 92 struct list_head policy_list;
@@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {}
369 367
370/* Policy Notifiers */ 368/* Policy Notifiers */
371#define CPUFREQ_ADJUST (0) 369#define CPUFREQ_ADJUST (0)
372#define CPUFREQ_INCOMPATIBLE (1) 370#define CPUFREQ_NOTIFY (1)
373#define CPUFREQ_NOTIFY (2) 371#define CPUFREQ_START (2)
374#define CPUFREQ_START (3) 372#define CPUFREQ_CREATE_POLICY (3)
375#define CPUFREQ_CREATE_POLICY (4) 373#define CPUFREQ_REMOVE_POLICY (4)
376#define CPUFREQ_REMOVE_POLICY (5)
377 374
378#ifdef CONFIG_CPU_FREQ 375#ifdef CONFIG_CPU_FREQ
379int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 376int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -578,6 +575,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
578int cpufreq_boost_trigger_state(int state); 575int cpufreq_boost_trigger_state(int state);
579int cpufreq_boost_supported(void); 576int cpufreq_boost_supported(void);
580int cpufreq_boost_enabled(void); 577int cpufreq_boost_enabled(void);
578int cpufreq_enable_boost_support(void);
579bool policy_has_boost_freq(struct cpufreq_policy *policy);
581#else 580#else
582static inline int cpufreq_boost_trigger_state(int state) 581static inline int cpufreq_boost_trigger_state(int state)
583{ 582{
@@ -591,12 +590,23 @@ static inline int cpufreq_boost_enabled(void)
591{ 590{
592 return 0; 591 return 0;
593} 592}
593
594static inline int cpufreq_enable_boost_support(void)
595{
596 return -EINVAL;
597}
598
599static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
600{
601 return false;
602}
594#endif 603#endif
595/* the following funtion is for cpufreq core use only */ 604/* the following funtion is for cpufreq core use only */
596struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); 605struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
597 606
598/* the following are really really optional */ 607/* the following are really really optional */
599extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 608extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
609extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
600extern struct freq_attr *cpufreq_generic_attr[]; 610extern struct freq_attr *cpufreq_generic_attr[];
601int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, 611int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
602 struct cpufreq_frequency_table *table); 612 struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index d075d34279df..786ad32631a6 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -84,7 +84,6 @@ struct cpuidle_device {
84 struct list_head device_list; 84 struct list_head device_list;
85 85
86#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 86#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
87 int safe_state_index;
88 cpumask_t coupled_cpus; 87 cpumask_t coupled_cpus;
89 struct cpuidle_coupled *coupled; 88 struct cpuidle_coupled *coupled;
90#endif 89#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8b6c083e68a7..8d70e1361ecd 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -137,6 +137,7 @@ struct cred {
137 kernel_cap_t cap_permitted; /* caps we're permitted */ 137 kernel_cap_t cap_permitted; /* caps we're permitted */
138 kernel_cap_t cap_effective; /* caps we can actually use */ 138 kernel_cap_t cap_effective; /* caps we can actually use */
139 kernel_cap_t cap_bset; /* capability bounding set */ 139 kernel_cap_t cap_bset; /* capability bounding set */
140 kernel_cap_t cap_ambient; /* Ambient capability set */
140#ifdef CONFIG_KEYS 141#ifdef CONFIG_KEYS
141 unsigned char jit_keyring; /* default keyring to attach requested 142 unsigned char jit_keyring; /* default keyring to attach requested
142 * keys to */ 143 * keys to */
@@ -212,6 +213,13 @@ static inline void validate_process_creds(void)
212} 213}
213#endif 214#endif
214 215
216static inline bool cap_ambient_invariant_ok(const struct cred *cred)
217{
218 return cap_issubset(cred->cap_ambient,
219 cap_intersect(cred->cap_permitted,
220 cred->cap_inheritable));
221}
222
215/** 223/**
216 * get_new_cred - Get a reference on a new set of credentials 224 * get_new_cred - Get a reference on a new set of credentials
217 * @cred: The new credentials to reference 225 * @cred: The new credentials to reference
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 81ef938b0a8e..e71cb70a1ac2 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -102,12 +102,6 @@
102#define CRYPTO_ALG_INTERNAL 0x00002000 102#define CRYPTO_ALG_INTERNAL 0x00002000
103 103
104/* 104/*
105 * Temporary flag used to prevent legacy AEAD implementations from
106 * being used by user-space.
107 */
108#define CRYPTO_ALG_AEAD_NEW 0x00004000
109
110/*
111 * Transform masks and values (for crt_flags). 105 * Transform masks and values (for crt_flags).
112 */ 106 */
113#define CRYPTO_TFM_REQ_MASK 0x000fff00 107#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -142,13 +136,10 @@
142struct scatterlist; 136struct scatterlist;
143struct crypto_ablkcipher; 137struct crypto_ablkcipher;
144struct crypto_async_request; 138struct crypto_async_request;
145struct crypto_aead;
146struct crypto_blkcipher; 139struct crypto_blkcipher;
147struct crypto_hash; 140struct crypto_hash;
148struct crypto_tfm; 141struct crypto_tfm;
149struct crypto_type; 142struct crypto_type;
150struct aead_request;
151struct aead_givcrypt_request;
152struct skcipher_givcrypt_request; 143struct skcipher_givcrypt_request;
153 144
154typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 145typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -275,47 +266,6 @@ struct ablkcipher_alg {
275}; 266};
276 267
277/** 268/**
278 * struct old_aead_alg - AEAD cipher definition
279 * @maxauthsize: Set the maximum authentication tag size supported by the
280 * transformation. A transformation may support smaller tag sizes.
281 * As the authentication tag is a message digest to ensure the
282 * integrity of the encrypted data, a consumer typically wants the
283 * largest authentication tag possible as defined by this
284 * variable.
285 * @setauthsize: Set authentication size for the AEAD transformation. This
286 * function is used to specify the consumer requested size of the
287 * authentication tag to be either generated by the transformation
288 * during encryption or the size of the authentication tag to be
289 * supplied during the decryption operation. This function is also
290 * responsible for checking the authentication tag size for
291 * validity.
292 * @setkey: see struct ablkcipher_alg
293 * @encrypt: see struct ablkcipher_alg
294 * @decrypt: see struct ablkcipher_alg
295 * @givencrypt: see struct ablkcipher_alg
296 * @givdecrypt: see struct ablkcipher_alg
297 * @geniv: see struct ablkcipher_alg
298 * @ivsize: see struct ablkcipher_alg
299 *
300 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
301 * mandatory and must be filled.
302 */
303struct old_aead_alg {
304 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
305 unsigned int keylen);
306 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
307 int (*encrypt)(struct aead_request *req);
308 int (*decrypt)(struct aead_request *req);
309 int (*givencrypt)(struct aead_givcrypt_request *req);
310 int (*givdecrypt)(struct aead_givcrypt_request *req);
311
312 const char *geniv;
313
314 unsigned int ivsize;
315 unsigned int maxauthsize;
316};
317
318/**
319 * struct blkcipher_alg - synchronous block cipher definition 269 * struct blkcipher_alg - synchronous block cipher definition
320 * @min_keysize: see struct ablkcipher_alg 270 * @min_keysize: see struct ablkcipher_alg
321 * @max_keysize: see struct ablkcipher_alg 271 * @max_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
409 359
410 360
411#define cra_ablkcipher cra_u.ablkcipher 361#define cra_ablkcipher cra_u.ablkcipher
412#define cra_aead cra_u.aead
413#define cra_blkcipher cra_u.blkcipher 362#define cra_blkcipher cra_u.blkcipher
414#define cra_cipher cra_u.cipher 363#define cra_cipher cra_u.cipher
415#define cra_compress cra_u.compress 364#define cra_compress cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
460 * struct crypto_type, which implements callbacks common for all 409 * struct crypto_type, which implements callbacks common for all
461 * transformation types. There are multiple options: 410 * transformation types. There are multiple options:
462 * &crypto_blkcipher_type, &crypto_ablkcipher_type, 411 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
463 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. 412 * &crypto_ahash_type, &crypto_rng_type.
464 * This field might be empty. In that case, there are no common 413 * This field might be empty. In that case, there are no common
465 * callbacks. This is the case for: cipher, compress, shash. 414 * callbacks. This is the case for: cipher, compress, shash.
466 * @cra_u: Callbacks implementing the transformation. This is a union of 415 * @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
508 457
509 union { 458 union {
510 struct ablkcipher_alg ablkcipher; 459 struct ablkcipher_alg ablkcipher;
511 struct old_aead_alg aead;
512 struct blkcipher_alg blkcipher; 460 struct blkcipher_alg blkcipher;
513 struct cipher_alg cipher; 461 struct cipher_alg cipher;
514 struct compress_alg compress; 462 struct compress_alg compress;
diff --git a/include/linux/dax.h b/include/linux/dax.h
new file mode 100644
index 000000000000..b415e521528d
--- /dev/null
+++ b/include/linux/dax.h
@@ -0,0 +1,39 @@
1#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
6#include <asm/pgtable.h>
7
8ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
9 get_block_t, dio_iodone_t, int flags);
10int dax_clear_blocks(struct inode *, sector_t block, long size);
11int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
12int dax_truncate_page(struct inode *, loff_t from, get_block_t);
13int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
14 dax_iodone_t);
15int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
16 dax_iodone_t);
17#ifdef CONFIG_TRANSPARENT_HUGEPAGE
18int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
19 unsigned int flags, get_block_t, dax_iodone_t);
20int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
21 unsigned int flags, get_block_t, dax_iodone_t);
22#else
23static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
24 pmd_t *pmd, unsigned int flags, get_block_t gb,
25 dax_iodone_t di)
26{
27 return VM_FAULT_FALLBACK;
28}
29#define __dax_pmd_fault dax_pmd_fault
30#endif
31int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
32#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
33#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
34
35static inline bool vma_is_dax(struct vm_area_struct *vma)
36{
37 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
38}
39#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 420311bcee38..9beb636b97eb 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -116,6 +116,12 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
116 116
117bool debugfs_initialized(void); 117bool debugfs_initialized(void);
118 118
119ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
120 size_t count, loff_t *ppos);
121
122ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
123 size_t count, loff_t *ppos);
124
119#else 125#else
120 126
121#include <linux/err.h> 127#include <linux/err.h>
@@ -282,6 +288,20 @@ static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
282 return ERR_PTR(-ENODEV); 288 return ERR_PTR(-ENODEV);
283} 289}
284 290
291static inline ssize_t debugfs_read_file_bool(struct file *file,
292 char __user *user_buf,
293 size_t count, loff_t *ppos)
294{
295 return -ENODEV;
296}
297
298static inline ssize_t debugfs_write_file_bool(struct file *file,
299 const char __user *user_buf,
300 size_t count, loff_t *ppos)
301{
302 return -ENODEV;
303}
304
285#endif 305#endif
286 306
287#endif 307#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 51cc1deb7af3..76d23fa8c7d3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -82,9 +82,6 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
82typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, 82typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
83 unsigned long arg); 83 unsigned long arg);
84 84
85typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
86 struct bio_vec *biovec, int max_size);
87
88/* 85/*
89 * These iteration functions are typically used to check (and combine) 86 * These iteration functions are typically used to check (and combine)
90 * properties of underlying devices. 87 * properties of underlying devices.
@@ -160,7 +157,6 @@ struct target_type {
160 dm_status_fn status; 157 dm_status_fn status;
161 dm_message_fn message; 158 dm_message_fn message;
162 dm_ioctl_fn ioctl; 159 dm_ioctl_fn ioctl;
163 dm_merge_fn merge;
164 dm_busy_fn busy; 160 dm_busy_fn busy;
165 dm_iterate_devices_fn iterate_devices; 161 dm_iterate_devices_fn iterate_devices;
166 dm_io_hints_fn io_hints; 162 dm_io_hints_fn io_hints;
diff --git a/include/linux/device.h b/include/linux/device.h
index a2b4ea70a946..5d7bc6349930 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -341,7 +341,7 @@ struct subsys_interface {
341 struct bus_type *subsys; 341 struct bus_type *subsys;
342 struct list_head node; 342 struct list_head node;
343 int (*add_dev)(struct device *dev, struct subsys_interface *sif); 343 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
344 int (*remove_dev)(struct device *dev, struct subsys_interface *sif); 344 void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
345}; 345};
346 346
347int subsys_interface_register(struct subsys_interface *sif); 347int subsys_interface_register(struct subsys_interface *sif);
@@ -714,6 +714,8 @@ struct device_dma_parameters {
714 * along with subsystem-level and driver-level callbacks. 714 * along with subsystem-level and driver-level callbacks.
715 * @pins: For device pin management. 715 * @pins: For device pin management.
716 * See Documentation/pinctrl.txt for details. 716 * See Documentation/pinctrl.txt for details.
717 * @msi_list: Hosts MSI descriptors
718 * @msi_domain: The generic MSI domain this device is using.
717 * @numa_node: NUMA node this device is close to. 719 * @numa_node: NUMA node this device is close to.
718 * @dma_mask: Dma mask (if dma'ble device). 720 * @dma_mask: Dma mask (if dma'ble device).
719 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 721 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -774,9 +776,15 @@ struct device {
774 struct dev_pm_info power; 776 struct dev_pm_info power;
775 struct dev_pm_domain *pm_domain; 777 struct dev_pm_domain *pm_domain;
776 778
779#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
780 struct irq_domain *msi_domain;
781#endif
777#ifdef CONFIG_PINCTRL 782#ifdef CONFIG_PINCTRL
778 struct dev_pin_info *pins; 783 struct dev_pin_info *pins;
779#endif 784#endif
785#ifdef CONFIG_GENERIC_MSI_IRQ
786 struct list_head msi_list;
787#endif
780 788
781#ifdef CONFIG_NUMA 789#ifdef CONFIG_NUMA
782 int numa_node; /* NUMA node this device is close to */ 790 int numa_node; /* NUMA node this device is close to */
@@ -861,6 +869,22 @@ static inline void set_dev_node(struct device *dev, int node)
861} 869}
862#endif 870#endif
863 871
872static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
873{
874#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
875 return dev->msi_domain;
876#else
877 return NULL;
878#endif
879}
880
881static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
882{
883#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
884 dev->msi_domain = d;
885#endif
886}
887
864static inline void *dev_get_drvdata(const struct device *dev) 888static inline void *dev_get_drvdata(const struct device *dev)
865{ 889{
866 return dev->driver_data; 890 return dev->driver_data;
@@ -959,6 +983,8 @@ extern int __must_check device_add(struct device *dev);
959extern void device_del(struct device *dev); 983extern void device_del(struct device *dev);
960extern int device_for_each_child(struct device *dev, void *data, 984extern int device_for_each_child(struct device *dev, void *data,
961 int (*fn)(struct device *dev, void *data)); 985 int (*fn)(struct device *dev, void *data));
986extern int device_for_each_child_reverse(struct device *dev, void *data,
987 int (*fn)(struct device *dev, void *data));
962extern struct device *device_find_child(struct device *dev, void *data, 988extern struct device *device_find_child(struct device *dev, void *data,
963 int (*match)(struct device *dev, void *data)); 989 int (*match)(struct device *dev, void *data));
964extern int device_rename(struct device *dev, const char *new_name); 990extern int device_rename(struct device *dev, const char *new_name);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2f5eb419976..7ea9184eaa13 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,6 +66,7 @@ enum dma_transaction_type {
66 DMA_XOR_VAL, 66 DMA_XOR_VAL,
67 DMA_PQ_VAL, 67 DMA_PQ_VAL,
68 DMA_MEMSET, 68 DMA_MEMSET,
69 DMA_MEMSET_SG,
69 DMA_INTERRUPT, 70 DMA_INTERRUPT,
70 DMA_SG, 71 DMA_SG,
71 DMA_PRIVATE, 72 DMA_PRIVATE,
@@ -183,6 +184,8 @@ struct dma_interleaved_template {
183 * operation it continues the calculation with new sources 184 * operation it continues the calculation with new sources
184 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend 185 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
185 * on the result of this operation 186 * on the result of this operation
187 * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
188 * cleared or freed
186 */ 189 */
187enum dma_ctrl_flags { 190enum dma_ctrl_flags {
188 DMA_PREP_INTERRUPT = (1 << 0), 191 DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@ enum dma_ctrl_flags {
191 DMA_PREP_PQ_DISABLE_Q = (1 << 3), 194 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
192 DMA_PREP_CONTINUE = (1 << 4), 195 DMA_PREP_CONTINUE = (1 << 4),
193 DMA_PREP_FENCE = (1 << 5), 196 DMA_PREP_FENCE = (1 << 5),
197 DMA_CTRL_REUSE = (1 << 6),
194}; 198};
195 199
196/** 200/**
@@ -400,6 +404,8 @@ enum dma_residue_granularity {
400 * @cmd_pause: true, if pause and thereby resume is supported 404 * @cmd_pause: true, if pause and thereby resume is supported
401 * @cmd_terminate: true, if terminate cmd is supported 405 * @cmd_terminate: true, if terminate cmd is supported
402 * @residue_granularity: granularity of the reported transfer residue 406 * @residue_granularity: granularity of the reported transfer residue
407 * @descriptor_reuse: if a descriptor can be reused by client and
408 * resubmitted multiple times
403 */ 409 */
404struct dma_slave_caps { 410struct dma_slave_caps {
405 u32 src_addr_widths; 411 u32 src_addr_widths;
@@ -408,6 +414,7 @@ struct dma_slave_caps {
408 bool cmd_pause; 414 bool cmd_pause;
409 bool cmd_terminate; 415 bool cmd_terminate;
410 enum dma_residue_granularity residue_granularity; 416 enum dma_residue_granularity residue_granularity;
417 bool descriptor_reuse;
411}; 418};
412 419
413static inline const char *dma_chan_name(struct dma_chan *chan) 420static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@ struct dma_async_tx_descriptor {
467 dma_addr_t phys; 474 dma_addr_t phys;
468 struct dma_chan *chan; 475 struct dma_chan *chan;
469 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 476 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
477 int (*desc_free)(struct dma_async_tx_descriptor *tx);
470 dma_async_tx_callback callback; 478 dma_async_tx_callback callback;
471 void *callback_param; 479 void *callback_param;
472 struct dmaengine_unmap_data *unmap; 480 struct dmaengine_unmap_data *unmap;
@@ -585,6 +593,20 @@ struct dma_tx_state {
585}; 593};
586 594
587/** 595/**
596 * enum dmaengine_alignment - defines alignment of the DMA async tx
597 * buffers
598 */
599enum dmaengine_alignment {
600 DMAENGINE_ALIGN_1_BYTE = 0,
601 DMAENGINE_ALIGN_2_BYTES = 1,
602 DMAENGINE_ALIGN_4_BYTES = 2,
603 DMAENGINE_ALIGN_8_BYTES = 3,
604 DMAENGINE_ALIGN_16_BYTES = 4,
605 DMAENGINE_ALIGN_32_BYTES = 5,
606 DMAENGINE_ALIGN_64_BYTES = 6,
607};
608
609/**
588 * struct dma_device - info on the entity supplying DMA services 610 * struct dma_device - info on the entity supplying DMA services
589 * @chancnt: how many DMA channels are supported 611 * @chancnt: how many DMA channels are supported
590 * @privatecnt: how many DMA channels are requested by dma_request_channel 612 * @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -616,6 +638,7 @@ struct dma_tx_state {
616 * @device_prep_dma_pq: prepares a pq operation 638 * @device_prep_dma_pq: prepares a pq operation
617 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 639 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
618 * @device_prep_dma_memset: prepares a memset operation 640 * @device_prep_dma_memset: prepares a memset operation
641 * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
619 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 642 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
620 * @device_prep_slave_sg: prepares a slave dma operation 643 * @device_prep_slave_sg: prepares a slave dma operation
621 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 644 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@ struct dma_device {
645 dma_cap_mask_t cap_mask; 668 dma_cap_mask_t cap_mask;
646 unsigned short max_xor; 669 unsigned short max_xor;
647 unsigned short max_pq; 670 unsigned short max_pq;
648 u8 copy_align; 671 enum dmaengine_alignment copy_align;
649 u8 xor_align; 672 enum dmaengine_alignment xor_align;
650 u8 pq_align; 673 enum dmaengine_alignment pq_align;
651 u8 fill_align; 674 enum dmaengine_alignment fill_align;
652 #define DMA_HAS_PQ_CONTINUE (1 << 15) 675 #define DMA_HAS_PQ_CONTINUE (1 << 15)
653 676
654 int dev_id; 677 int dev_id;
@@ -682,6 +705,9 @@ struct dma_device {
682 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 705 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
683 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 706 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
684 unsigned long flags); 707 unsigned long flags);
708 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
709 struct dma_chan *chan, struct scatterlist *sg,
710 unsigned int nents, int value, unsigned long flags);
685 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 711 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
686 struct dma_chan *chan, unsigned long flags); 712 struct dma_chan *chan, unsigned long flags);
687 struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 713 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
833 return desc->tx_submit(desc); 859 return desc->tx_submit(desc);
834} 860}
835 861
836static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) 862static inline bool dmaengine_check_align(enum dmaengine_alignment align,
863 size_t off1, size_t off2, size_t len)
837{ 864{
838 size_t mask; 865 size_t mask;
839 866
@@ -1155,6 +1182,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
1155} 1182}
1156#endif 1183#endif
1157 1184
1185static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1186{
1187 struct dma_slave_caps caps;
1188
1189 dma_get_slave_caps(tx->chan, &caps);
1190
1191 if (caps.descriptor_reuse) {
1192 tx->flags |= DMA_CTRL_REUSE;
1193 return 0;
1194 } else {
1195 return -EPERM;
1196 }
1197}
1198
1199static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1200{
1201 tx->flags &= ~DMA_CTRL_REUSE;
1202}
1203
1204static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1205{
1206 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1207}
1208
1209static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1210{
1211 /* this is supported for reusable desc, so check that */
1212 if (dmaengine_desc_test_reuse(desc))
1213 return desc->desc_free(desc);
1214 else
1215 return -EPERM;
1216}
1217
1158/* --- DMA device --- */ 1218/* --- DMA device --- */
1159 1219
1160int dma_async_device_register(struct dma_device *device); 1220int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1169static inline struct dma_chan 1229static inline struct dma_chan
1170*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, 1230*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1171 dma_filter_fn fn, void *fn_param, 1231 dma_filter_fn fn, void *fn_param,
1172 struct device *dev, char *name) 1232 struct device *dev, const char *name)
1173{ 1233{
1174 struct dma_chan *chan; 1234 struct dma_chan *chan;
1175 1235
@@ -1177,6 +1237,9 @@ static inline struct dma_chan
1177 if (chan) 1237 if (chan)
1178 return chan; 1238 return chan;
1179 1239
1240 if (!fn || !fn_param)
1241 return NULL;
1242
1180 return __dma_request_channel(mask, fn, fn_param); 1243 return __dma_request_channel(mask, fn, fn_param);
1181} 1244}
1182#endif /* DMAENGINE_H */ 1245#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index e1043f79122f..53ba737505df 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -24,6 +24,12 @@ void dma_pool_destroy(struct dma_pool *pool);
24void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 24void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
25 dma_addr_t *handle); 25 dma_addr_t *handle);
26 26
27static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
28 dma_addr_t *handle)
29{
30 return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
31}
32
27void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); 33void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
28 34
29/* 35/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9012f8775208..eb049c622208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
76 76
77#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 77#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
78 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) | 78 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
79 ((a[2] ^ b[2]) & m)) == 0; 79 (__force int)((a[2] ^ b[2]) & m)) == 0;
80#else 80#else
81 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; 81 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
82#endif 82#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b16d929fa75f..c0f8c4fc5d45 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -27,8 +27,6 @@
27#define __LINUX_EXTCON_H__ 27#define __LINUX_EXTCON_H__
28 28
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/notifier.h>
31#include <linux/sysfs.h>
32 30
33/* 31/*
34 * Define the unique id of supported external connectors 32 * Define the unique id of supported external connectors
@@ -77,8 +75,6 @@ struct extcon_cable;
77 * be attached simulataneously. {0x7, 0} is equivalent to 75 * be attached simulataneously. {0x7, 0} is equivalent to
78 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there 76 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
79 * can be no simultaneous connections. 77 * can be no simultaneous connections.
80 * @print_state: An optional callback to override the method to print the
81 * status of the extcon device.
82 * @dev: Device of this extcon. 78 * @dev: Device of this extcon.
83 * @state: Attach/detach state of this extcon. Do not provide at 79 * @state: Attach/detach state of this extcon. Do not provide at
84 * register-time. 80 * register-time.
@@ -102,9 +98,6 @@ struct extcon_dev {
102 const unsigned int *supported_cable; 98 const unsigned int *supported_cable;
103 const u32 *mutually_exclusive; 99 const u32 *mutually_exclusive;
104 100
105 /* Optional callbacks to override class functions */
106 ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
107
108 /* Internal data. Please do not set. */ 101 /* Internal data. Please do not set. */
109 struct device dev; 102 struct device dev;
110 struct raw_notifier_head *nh; 103 struct raw_notifier_head *nh;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a21ffd..25c6324a0dd0 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@ typedef __le32 f2fs_hash_t;
417 417
418#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS) 418#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
419 419
420/* the number of dentry in a block */
421#define NR_DENTRY_IN_BLOCK 214
422
423/* MAX level for dir lookup */ 420/* MAX level for dir lookup */
424#define MAX_DIR_HASH_DEPTH 63 421#define MAX_DIR_HASH_DEPTH 63
425 422
426/* MAX buckets in one level of dir */ 423/* MAX buckets in one level of dir */
427#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1)) 424#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
428 425
426/*
427 * space utilization of regular dentry and inline dentry
428 * regular dentry inline dentry
429 * bitmap 1 * 27 = 27 1 * 23 = 23
430 * reserved 1 * 3 = 3 1 * 7 = 7
431 * dentry 11 * 214 = 2354 11 * 182 = 2002
432 * filename 8 * 214 = 1712 8 * 182 = 1456
433 * total 4096 3488
434 *
435 * Note: there are more reserved space in inline dentry than in regular
436 * dentry, when converting inline dentry we should handle this carefully.
437 */
438#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
429#define SIZE_OF_DIR_ENTRY 11 /* by byte */ 439#define SIZE_OF_DIR_ENTRY 11 /* by byte */
430#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ 440#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
431 BITS_PER_BYTE) 441 BITS_PER_BYTE)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 043f3283b71c..bc9afa74ee11 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -788,7 +788,7 @@ struct dmt_videomode {
788 788
789extern const char *fb_mode_option; 789extern const char *fb_mode_option;
790extern const struct fb_videomode vesa_modes[]; 790extern const struct fb_videomode vesa_modes[];
791extern const struct fb_videomode cea_modes[64]; 791extern const struct fb_videomode cea_modes[65];
792extern const struct dmt_videomode dmt_modes[]; 792extern const struct dmt_videomode dmt_modes[];
793 793
794struct fb_modelist { 794struct fb_modelist {
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index fbb88740634a..674e3e226465 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
86 86
87static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd) 87static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
88{ 88{
89 rcu_lockdep_assert(rcu_read_lock_held() || 89 RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
90 lockdep_is_held(&files->file_lock), 90 !lockdep_is_held(&files->file_lock),
91 "suspicious rcu_dereference_check() usage"); 91 "suspicious rcu_dereference_check() usage");
92 return __fcheck_files(files, fd); 92 return __fcheck_files(files, fd);
93} 93}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6ea983..fa2cab985e57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <linux/printk.h> 13#include <linux/printk.h>
14#include <linux/workqueue.h> 14#include <linux/workqueue.h>
15#include <linux/sched.h>
15 16
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17 18
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
354 offsetof(struct bpf_prog, insns[proglen])); 355 offsetof(struct bpf_prog, insns[proglen]));
355} 356}
356 357
358static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
359{
360 /* When classic BPF programs have been loaded and the arch
361 * does not have a classic BPF JIT (anymore), they have been
362 * converted via bpf_migrate_filter() to eBPF and thus always
363 * have an unspec program type.
364 */
365 return prog->type == BPF_PROG_TYPE_UNSPEC;
366}
367
357#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 368#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
358 369
359#ifdef CONFIG_DEBUG_SET_MODULE_RONX 370#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
411 422
412u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 423u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
413void bpf_int_jit_compile(struct bpf_prog *fp); 424void bpf_int_jit_compile(struct bpf_prog *fp);
425bool bpf_helper_changes_skb_data(void *func);
414 426
415#ifdef CONFIG_BPF_JIT 427#ifdef CONFIG_BPF_JIT
416typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 428typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
427static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, 439static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
428 u32 pass, void *image) 440 u32 pass, void *image)
429{ 441{
430 pr_err("flen=%u proglen=%u pass=%u image=%pK\n", 442 pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
431 flen, proglen, pass, image); 443 proglen, pass, image, current->comm, task_pid_nr(current));
444
432 if (image) 445 if (image)
433 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 446 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
434 16, 1, image, proglen, false); 447 16, 1, image, proglen, false);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 84b783f277f7..72d8a844c692 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1,7 +1,6 @@
1#ifndef _LINUX_FS_H 1#ifndef _LINUX_FS_H
2#define _LINUX_FS_H 2#define _LINUX_FS_H
3 3
4
5#include <linux/linkage.h> 4#include <linux/linkage.h>
6#include <linux/wait.h> 5#include <linux/wait.h>
7#include <linux/kdev_t.h> 6#include <linux/kdev_t.h>
@@ -30,6 +29,8 @@
30#include <linux/lockdep.h> 29#include <linux/lockdep.h>
31#include <linux/percpu-rwsem.h> 30#include <linux/percpu-rwsem.h>
32#include <linux/blk_types.h> 31#include <linux/blk_types.h>
32#include <linux/workqueue.h>
33#include <linux/percpu-rwsem.h>
33 34
34#include <asm/byteorder.h> 35#include <asm/byteorder.h>
35#include <uapi/linux/fs.h> 36#include <uapi/linux/fs.h>
@@ -51,7 +52,6 @@ struct swap_info_struct;
51struct seq_file; 52struct seq_file;
52struct workqueue_struct; 53struct workqueue_struct;
53struct iov_iter; 54struct iov_iter;
54struct vm_fault;
55 55
56extern void __init inode_init(void); 56extern void __init inode_init(void);
57extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
@@ -636,7 +636,7 @@ struct inode {
636 unsigned long dirtied_time_when; 636 unsigned long dirtied_time_when;
637 637
638 struct hlist_node i_hash; 638 struct hlist_node i_hash;
639 struct list_head i_wb_list; /* backing dev IO list */ 639 struct list_head i_io_list; /* backing dev IO list */
640#ifdef CONFIG_CGROUP_WRITEBACK 640#ifdef CONFIG_CGROUP_WRITEBACK
641 struct bdi_writeback *i_wb; /* the associated cgroup wb */ 641 struct bdi_writeback *i_wb; /* the associated cgroup wb */
642 642
@@ -943,12 +943,18 @@ struct lock_manager_operations {
943 943
944struct lock_manager { 944struct lock_manager {
945 struct list_head list; 945 struct list_head list;
946 /*
947 * NFSv4 and up also want opens blocked during the grace period;
948 * NLM doesn't care:
949 */
950 bool block_opens;
946}; 951};
947 952
948struct net; 953struct net;
949void locks_start_grace(struct net *, struct lock_manager *); 954void locks_start_grace(struct net *, struct lock_manager *);
950void locks_end_grace(struct lock_manager *); 955void locks_end_grace(struct lock_manager *);
951int locks_in_grace(struct net *); 956int locks_in_grace(struct net *);
957int opens_in_grace(struct net *);
952 958
953/* that will die - we need it for nfs_lock_info */ 959/* that will die - we need it for nfs_lock_info */
954#include <linux/nfs_fs_i.h> 960#include <linux/nfs_fs_i.h>
@@ -1260,6 +1266,7 @@ struct mm_struct;
1260 1266
1261/* sb->s_iflags */ 1267/* sb->s_iflags */
1262#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ 1268#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1269#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
1263 1270
1264/* Possible states of 'frozen' field */ 1271/* Possible states of 'frozen' field */
1265enum { 1272enum {
@@ -1274,16 +1281,9 @@ enum {
1274#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) 1281#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
1275 1282
1276struct sb_writers { 1283struct sb_writers {
1277 /* Counters for counting writers at each level */ 1284 int frozen; /* Is sb frozen? */
1278 struct percpu_counter counter[SB_FREEZE_LEVELS]; 1285 wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
1279 wait_queue_head_t wait; /* queue for waiting for 1286 struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
1280 writers / faults to finish */
1281 int frozen; /* Is sb frozen? */
1282 wait_queue_head_t wait_unfrozen; /* queue for waiting for
1283 sb to be thawed */
1284#ifdef CONFIG_DEBUG_LOCK_ALLOC
1285 struct lockdep_map lock_map[SB_FREEZE_LEVELS];
1286#endif
1287}; 1287};
1288 1288
1289struct super_block { 1289struct super_block {
@@ -1309,7 +1309,6 @@ struct super_block {
1309#endif 1309#endif
1310 const struct xattr_handler **s_xattr; 1310 const struct xattr_handler **s_xattr;
1311 1311
1312 struct list_head s_inodes; /* all inodes */
1313 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ 1312 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1314 struct list_head s_mounts; /* list of mounts; _not_ for fs use */ 1313 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1315 struct block_device *s_bdev; 1314 struct block_device *s_bdev;
@@ -1375,11 +1374,18 @@ struct super_block {
1375 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1374 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1376 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1375 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1377 struct rcu_head rcu; 1376 struct rcu_head rcu;
1377 struct work_struct destroy_work;
1378
1379 struct mutex s_sync_lock; /* sync serialisation lock */
1378 1380
1379 /* 1381 /*
1380 * Indicates how deep in a filesystem stack this SB is 1382 * Indicates how deep in a filesystem stack this SB is
1381 */ 1383 */
1382 int s_stack_depth; 1384 int s_stack_depth;
1385
1386 /* s_inode_list_lock protects s_inodes */
1387 spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
1388 struct list_head s_inodes; /* all inodes */
1383}; 1389};
1384 1390
1385extern struct timespec current_fs_time(struct super_block *sb); 1391extern struct timespec current_fs_time(struct super_block *sb);
@@ -1391,6 +1397,11 @@ extern struct timespec current_fs_time(struct super_block *sb);
1391void __sb_end_write(struct super_block *sb, int level); 1397void __sb_end_write(struct super_block *sb, int level);
1392int __sb_start_write(struct super_block *sb, int level, bool wait); 1398int __sb_start_write(struct super_block *sb, int level, bool wait);
1393 1399
1400#define __sb_writers_acquired(sb, lev) \
1401 percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
1402#define __sb_writers_release(sb, lev) \
1403 percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
1404
1394/** 1405/**
1395 * sb_end_write - drop write access to a superblock 1406 * sb_end_write - drop write access to a superblock
1396 * @sb: the super we wrote to 1407 * @sb: the super we wrote to
@@ -1611,7 +1622,6 @@ struct file_operations {
1611 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 1622 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1612 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 1623 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1613 int (*mmap) (struct file *, struct vm_area_struct *); 1624 int (*mmap) (struct file *, struct vm_area_struct *);
1614 int (*mremap)(struct file *, struct vm_area_struct *);
1615 int (*open) (struct inode *, struct file *); 1625 int (*open) (struct inode *, struct file *);
1616 int (*flush) (struct file *, fl_owner_t id); 1626 int (*flush) (struct file *, fl_owner_t id);
1617 int (*release) (struct inode *, struct file *); 1627 int (*release) (struct inode *, struct file *);
@@ -2608,7 +2618,7 @@ static inline void insert_inode_hash(struct inode *inode)
2608extern void __remove_inode_hash(struct inode *); 2618extern void __remove_inode_hash(struct inode *);
2609static inline void remove_inode_hash(struct inode *inode) 2619static inline void remove_inode_hash(struct inode *inode)
2610{ 2620{
2611 if (!inode_unhashed(inode)) 2621 if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
2612 __remove_inode_hash(inode); 2622 __remove_inode_hash(inode);
2613} 2623}
2614 2624
@@ -2667,19 +2677,6 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
2667extern int generic_file_open(struct inode * inode, struct file * filp); 2677extern int generic_file_open(struct inode * inode, struct file * filp);
2668extern int nonseekable_open(struct inode * inode, struct file * filp); 2678extern int nonseekable_open(struct inode * inode, struct file * filp);
2669 2679
2670ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
2671 get_block_t, dio_iodone_t, int flags);
2672int dax_clear_blocks(struct inode *, sector_t block, long size);
2673int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
2674int dax_truncate_page(struct inode *, loff_t from, get_block_t);
2675int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
2676 dax_iodone_t);
2677int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
2678 dax_iodone_t);
2679int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
2680#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
2681#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
2682
2683#ifdef CONFIG_BLOCK 2680#ifdef CONFIG_BLOCK
2684typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2681typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2685 loff_t file_offset); 2682 loff_t file_offset);
@@ -3041,4 +3038,6 @@ static inline bool dir_relax(struct inode *inode)
3041 return !IS_DEADDIR(inode); 3038 return !IS_DEADDIR(inode);
3042} 3039}
3043 3040
3041extern bool path_noexec(const struct path *path);
3042
3044#endif /* _LINUX_FS_H */ 3043#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 2a2f56b292c1..f2912914141a 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -20,11 +20,6 @@
20#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI 20#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI
21 PHY CLK to become stable - 10ms*/ 21 PHY CLK to become stable - 10ms*/
22#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */ 22#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */
23#define FSL_USB_VER_OLD 0
24#define FSL_USB_VER_1_6 1
25#define FSL_USB_VER_2_2 2
26#define FSL_USB_VER_2_4 3
27#define FSL_USB_VER_2_5 4
28 23
29#include <linux/types.h> 24#include <linux/types.h>
30 25
@@ -52,6 +47,15 @@
52 * 47 *
53 */ 48 */
54 49
50enum fsl_usb2_controller_ver {
51 FSL_USB_VER_NONE = -1,
52 FSL_USB_VER_OLD = 0,
53 FSL_USB_VER_1_6 = 1,
54 FSL_USB_VER_2_2 = 2,
55 FSL_USB_VER_2_4 = 3,
56 FSL_USB_VER_2_5 = 4,
57};
58
55enum fsl_usb2_operating_modes { 59enum fsl_usb2_operating_modes {
56 FSL_USB2_MPH_HOST, 60 FSL_USB2_MPH_HOST,
57 FSL_USB2_DR_HOST, 61 FSL_USB2_DR_HOST,
@@ -65,6 +69,7 @@ enum fsl_usb2_phy_modes {
65 FSL_USB2_PHY_UTMI, 69 FSL_USB2_PHY_UTMI,
66 FSL_USB2_PHY_UTMI_WIDE, 70 FSL_USB2_PHY_UTMI_WIDE,
67 FSL_USB2_PHY_SERIAL, 71 FSL_USB2_PHY_SERIAL,
72 FSL_USB2_PHY_UTMI_DUAL,
68}; 73};
69 74
70struct clk; 75struct clk;
@@ -72,7 +77,7 @@ struct platform_device;
72 77
73struct fsl_usb2_platform_data { 78struct fsl_usb2_platform_data {
74 /* board specific information */ 79 /* board specific information */
75 int controller_ver; 80 enum fsl_usb2_controller_ver controller_ver;
76 enum fsl_usb2_operating_modes operating_mode; 81 enum fsl_usb2_operating_modes operating_mode;
77 enum fsl_usb2_phy_modes phy_mode; 82 enum fsl_usb2_phy_modes phy_mode;
78 unsigned int port_enables; 83 unsigned int port_enables;
@@ -93,6 +98,9 @@ struct fsl_usb2_platform_data {
93 98
94 unsigned suspended:1; 99 unsigned suspended:1;
95 unsigned already_suspended:1; 100 unsigned already_suspended:1;
101 unsigned has_fsl_erratum_a007792:1;
102 unsigned has_fsl_erratum_a005275:1;
103 unsigned check_phy_clk_valid:1;
96 104
97 /* register save area for suspend/resume */ 105 /* register save area for suspend/resume */
98 u32 pm_command; 106 u32 pm_command;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321eabbda..0023088b253b 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@ struct fsl_ifc_ctrl {
841 841
842 u32 nand_stat; 842 u32 nand_stat;
843 wait_queue_head_t nand_wait; 843 wait_queue_head_t nand_wait;
844 bool little_endian;
844}; 845};
845 846
846extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; 847extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
847 848
849static inline u32 ifc_in32(void __iomem *addr)
850{
851 u32 val;
852
853 if (fsl_ifc_ctrl_dev->little_endian)
854 val = ioread32(addr);
855 else
856 val = ioread32be(addr);
857
858 return val;
859}
860
861static inline u16 ifc_in16(void __iomem *addr)
862{
863 u16 val;
864
865 if (fsl_ifc_ctrl_dev->little_endian)
866 val = ioread16(addr);
867 else
868 val = ioread16be(addr);
869
870 return val;
871}
872
873static inline u8 ifc_in8(void __iomem *addr)
874{
875 return ioread8(addr);
876}
877
878static inline void ifc_out32(u32 val, void __iomem *addr)
879{
880 if (fsl_ifc_ctrl_dev->little_endian)
881 iowrite32(val, addr);
882 else
883 iowrite32be(val, addr);
884}
885
886static inline void ifc_out16(u16 val, void __iomem *addr)
887{
888 if (fsl_ifc_ctrl_dev->little_endian)
889 iowrite16(val, addr);
890 else
891 iowrite16be(val, addr);
892}
893
894static inline void ifc_out8(u8 val, void __iomem *addr)
895{
896 iowrite8(val, addr);
897}
848 898
849#endif /* __ASM_FSL_IFC_H */ 899#endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 65a517dd32f7..533c4408529a 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -195,40 +195,49 @@ struct fsnotify_group {
195#define FSNOTIFY_EVENT_INODE 2 195#define FSNOTIFY_EVENT_INODE 2
196 196
197/* 197/*
198 * a mark is simply an object attached to an in core inode which allows an 198 * A mark is simply an object attached to an in core inode which allows an
199 * fsnotify listener to indicate they are either no longer interested in events 199 * fsnotify listener to indicate they are either no longer interested in events
200 * of a type matching mask or only interested in those events. 200 * of a type matching mask or only interested in those events.
201 * 201 *
202 * these are flushed when an inode is evicted from core and may be flushed 202 * These are flushed when an inode is evicted from core and may be flushed
203 * when the inode is modified (as seen by fsnotify_access). Some fsnotify users 203 * when the inode is modified (as seen by fsnotify_access). Some fsnotify
204 * (such as dnotify) will flush these when the open fd is closed and not at 204 * users (such as dnotify) will flush these when the open fd is closed and not
205 * inode eviction or modification. 205 * at inode eviction or modification.
206 *
207 * Text in brackets is showing the lock(s) protecting modifications of a
208 * particular entry. obj_lock means either inode->i_lock or
209 * mnt->mnt_root->d_lock depending on the mark type.
206 */ 210 */
207struct fsnotify_mark { 211struct fsnotify_mark {
208 __u32 mask; /* mask this mark is for */ 212 /* Mask this mark is for [mark->lock, group->mark_mutex] */
209 /* we hold ref for each i_list and g_list. also one ref for each 'thing' 213 __u32 mask;
214 /* We hold one for presence in g_list. Also one ref for each 'thing'
210 * in kernel that found and may be using this mark. */ 215 * in kernel that found and may be using this mark. */
211 atomic_t refcnt; /* active things looking at this mark */ 216 atomic_t refcnt;
212 struct fsnotify_group *group; /* group this mark is for */ 217 /* Group this mark is for. Set on mark creation, stable until last ref
213 struct list_head g_list; /* list of marks by group->i_fsnotify_marks 218 * is dropped */
214 * Also reused for queueing mark into 219 struct fsnotify_group *group;
215 * destroy_list when it's waiting for 220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing
216 * the end of SRCU period before it can 221 * mark into destroy_list when it's waiting for the end of SRCU period
217 * be freed */ 222 * before it can be freed. [group->mark_mutex] */
218 spinlock_t lock; /* protect group and inode */ 223 struct list_head g_list;
219 struct hlist_node obj_list; /* list of marks for inode / vfsmount */ 224 /* Protects inode / mnt pointers, flags, masks */
220 struct list_head free_list; /* tmp list used when freeing this mark */ 225 spinlock_t lock;
221 union { 226 /* List of marks for inode / vfsmount [obj_lock] */
227 struct hlist_node obj_list;
228 union { /* Object pointer [mark->lock, group->mark_mutex] */
222 struct inode *inode; /* inode this mark is associated with */ 229 struct inode *inode; /* inode this mark is associated with */
223 struct vfsmount *mnt; /* vfsmount this mark is associated with */ 230 struct vfsmount *mnt; /* vfsmount this mark is associated with */
224 }; 231 };
225 __u32 ignored_mask; /* events types to ignore */ 232 /* Events types to ignore [mark->lock, group->mark_mutex] */
233 __u32 ignored_mask;
226#define FSNOTIFY_MARK_FLAG_INODE 0x01 234#define FSNOTIFY_MARK_FLAG_INODE 0x01
227#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02 235#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
228#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04 236#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
229#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 237#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
230#define FSNOTIFY_MARK_FLAG_ALIVE 0x10 238#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
231 unsigned int flags; /* vfsmount or inode mark? */ 239#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
240 unsigned int flags; /* flags [mark->lock] */
232 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ 241 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
233}; 242};
234 243
@@ -345,8 +354,10 @@ extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_
345/* given a group and a mark, flag mark to be freed when all references are dropped */ 354/* given a group and a mark, flag mark to be freed when all references are dropped */
346extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, 355extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
347 struct fsnotify_group *group); 356 struct fsnotify_group *group);
348extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, 357/* detach mark from inode / mount list, group list, drop inode reference */
349 struct fsnotify_group *group); 358extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
359/* free mark */
360extern void fsnotify_free_mark(struct fsnotify_mark *mark);
350/* run all the marks in a group, and clear all of the vfsmount marks */ 361/* run all the marks in a group, and clear all of the vfsmount marks */
351extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); 362extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
352/* run all the marks in a group, and clear all of the inode marks */ 363/* run all the marks in a group, and clear all of the inode marks */
@@ -357,7 +368,7 @@ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, un
357extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); 368extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
358extern void fsnotify_get_mark(struct fsnotify_mark *mark); 369extern void fsnotify_get_mark(struct fsnotify_mark *mark);
359extern void fsnotify_put_mark(struct fsnotify_mark *mark); 370extern void fsnotify_put_mark(struct fsnotify_mark *mark);
360extern void fsnotify_unmount_inodes(struct list_head *list); 371extern void fsnotify_unmount_inodes(struct super_block *sb);
361 372
362/* put here because inotify does some weird stuff when destroying watches */ 373/* put here because inotify does some weird stuff when destroying watches */
363extern void fsnotify_init_event(struct fsnotify_event *event, 374extern void fsnotify_init_event(struct fsnotify_event *event,
@@ -393,7 +404,7 @@ static inline u32 fsnotify_get_cookie(void)
393 return 0; 404 return 0;
394} 405}
395 406
396static inline void fsnotify_unmount_inodes(struct list_head *list) 407static inline void fsnotify_unmount_inodes(struct super_block *sb)
397{} 408{}
398 409
399#endif /* CONFIG_FSNOTIFY */ 410#endif /* CONFIG_FSNOTIFY */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5383bb1394a1..7ff168d06967 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -59,6 +59,8 @@ struct gen_pool {
59 59
60 genpool_algo_t algo; /* allocation function */ 60 genpool_algo_t algo; /* allocation function */
61 void *data; 61 void *data;
62
63 const char *name;
62}; 64};
63 65
64/* 66/*
@@ -118,8 +120,8 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
118 unsigned long start, unsigned int nr, void *data); 120 unsigned long start, unsigned int nr, void *data);
119 121
120extern struct gen_pool *devm_gen_pool_create(struct device *dev, 122extern struct gen_pool *devm_gen_pool_create(struct device *dev,
121 int min_alloc_order, int nid); 123 int min_alloc_order, int nid, const char *name);
122extern struct gen_pool *gen_pool_get(struct device *dev); 124extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
123 125
124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, 126bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
125 size_t size); 127 size_t size);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index ec274e0f4ed2..2adbfa6d02bc 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -13,6 +13,7 @@
13#include <linux/kdev_t.h> 13#include <linux/kdev_t.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/percpu-refcount.h>
16 17
17#ifdef CONFIG_BLOCK 18#ifdef CONFIG_BLOCK
18 19
@@ -124,7 +125,7 @@ struct hd_struct {
124#else 125#else
125 struct disk_stats dkstats; 126 struct disk_stats dkstats;
126#endif 127#endif
127 atomic_t ref; 128 struct percpu_ref ref;
128 struct rcu_head rcu_head; 129 struct rcu_head rcu_head;
129}; 130};
130 131
@@ -611,7 +612,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
611 sector_t len, int flags, 612 sector_t len, int flags,
612 struct partition_meta_info 613 struct partition_meta_info
613 *info); 614 *info);
614extern void __delete_partition(struct hd_struct *); 615extern void __delete_partition(struct percpu_ref *);
615extern void delete_partition(struct gendisk *, int); 616extern void delete_partition(struct gendisk *, int);
616extern void printk_all_partitions(void); 617extern void printk_all_partitions(void);
617 618
@@ -640,27 +641,39 @@ extern ssize_t part_fail_store(struct device *dev,
640 const char *buf, size_t count); 641 const char *buf, size_t count);
641#endif /* CONFIG_FAIL_MAKE_REQUEST */ 642#endif /* CONFIG_FAIL_MAKE_REQUEST */
642 643
643static inline void hd_ref_init(struct hd_struct *part) 644static inline int hd_ref_init(struct hd_struct *part)
644{ 645{
645 atomic_set(&part->ref, 1); 646 if (percpu_ref_init(&part->ref, __delete_partition, 0,
646 smp_mb(); 647 GFP_KERNEL))
648 return -ENOMEM;
649 return 0;
647} 650}
648 651
649static inline void hd_struct_get(struct hd_struct *part) 652static inline void hd_struct_get(struct hd_struct *part)
650{ 653{
651 atomic_inc(&part->ref); 654 percpu_ref_get(&part->ref);
652 smp_mb__after_atomic();
653} 655}
654 656
655static inline int hd_struct_try_get(struct hd_struct *part) 657static inline int hd_struct_try_get(struct hd_struct *part)
656{ 658{
657 return atomic_inc_not_zero(&part->ref); 659 return percpu_ref_tryget_live(&part->ref);
658} 660}
659 661
660static inline void hd_struct_put(struct hd_struct *part) 662static inline void hd_struct_put(struct hd_struct *part)
661{ 663{
662 if (atomic_dec_and_test(&part->ref)) 664 percpu_ref_put(&part->ref);
663 __delete_partition(part); 665}
666
667static inline void hd_struct_kill(struct hd_struct *part)
668{
669 percpu_ref_kill(&part->ref);
670}
671
672static inline void hd_free_part(struct hd_struct *part)
673{
674 free_part_stats(part);
675 free_part_info(part);
676 percpu_ref_exit(&part->ref);
664} 677}
665 678
666/* 679/*
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ad35f300b9a4..f92cbd2f4450 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -63,7 +63,10 @@ struct vm_area_struct;
63 * but it is definitely preferable to use the flag rather than opencode endless 63 * but it is definitely preferable to use the flag rather than opencode endless
64 * loop around allocator. 64 * loop around allocator.
65 * 65 *
66 * __GFP_NORETRY: The VM implementation must not retry indefinitely. 66 * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
67 * return NULL when direct reclaim and memory compaction have failed to allow
68 * the allocation to succeed. The OOM killer is not called with the current
69 * implementation.
67 * 70 *
68 * __GFP_MOVABLE: Flag that this page will be movable by the page migration 71 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
69 * mechanism or reclaimed 72 * mechanism or reclaimed
@@ -300,22 +303,31 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
300 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); 303 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
301} 304}
302 305
303static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 306/*
304 unsigned int order) 307 * Allocate pages, preferring the node given as nid. The node must be valid and
308 * online. For more general interface, see alloc_pages_node().
309 */
310static inline struct page *
311__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
305{ 312{
306 /* Unknown node is current node */ 313 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
307 if (nid < 0) 314 VM_WARN_ON(!node_online(nid));
308 nid = numa_node_id();
309 315
310 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 316 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
311} 317}
312 318
313static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, 319/*
320 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
321 * prefer the current CPU's closest node. Otherwise node must be valid and
322 * online.
323 */
324static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
314 unsigned int order) 325 unsigned int order)
315{ 326{
316 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); 327 if (nid == NUMA_NO_NODE)
328 nid = numa_mem_id();
317 329
318 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 330 return __alloc_pages_node(nid, gfp_mask, order);
319} 331}
320 332
321#ifdef CONFIG_NUMA 333#ifdef CONFIG_NUMA
@@ -354,7 +366,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
354 366
355void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 367void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
356void free_pages_exact(void *virt, size_t size); 368void free_pages_exact(void *virt, size_t size);
357/* This is different from alloc_pages_exact_node !!! */
358void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 369void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
359 370
360#define __get_free_page(gfp_mask) \ 371#define __get_free_page(gfp_mask) \
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index adac255aee86..14cac67c2012 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@ enum gpiod_flags {
47int gpiod_count(struct device *dev, const char *con_id); 47int gpiod_count(struct device *dev, const char *con_id);
48 48
49/* Acquire and dispose GPIOs */ 49/* Acquire and dispose GPIOs */
50struct gpio_desc *__must_check __gpiod_get(struct device *dev, 50struct gpio_desc *__must_check gpiod_get(struct device *dev,
51 const char *con_id, 51 const char *con_id,
52 enum gpiod_flags flags); 52 enum gpiod_flags flags);
53struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, 53struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
54 const char *con_id, 54 const char *con_id,
55 unsigned int idx, 55 unsigned int idx,
56 enum gpiod_flags flags); 56 enum gpiod_flags flags);
57struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev, 57struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
58 const char *con_id, 58 const char *con_id,
59 enum gpiod_flags flags); 59 enum gpiod_flags flags);
60struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, 60struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
61 const char *con_id, 61 const char *con_id,
62 unsigned int index, 62 unsigned int index,
63 enum gpiod_flags flags); 63 enum gpiod_flags flags);
@@ -70,18 +70,18 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
70void gpiod_put(struct gpio_desc *desc); 70void gpiod_put(struct gpio_desc *desc);
71void gpiod_put_array(struct gpio_descs *descs); 71void gpiod_put_array(struct gpio_descs *descs);
72 72
73struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev, 73struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
74 const char *con_id, 74 const char *con_id,
75 enum gpiod_flags flags); 75 enum gpiod_flags flags);
76struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, 76struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
77 const char *con_id, 77 const char *con_id,
78 unsigned int idx, 78 unsigned int idx,
79 enum gpiod_flags flags); 79 enum gpiod_flags flags);
80struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev, 80struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
81 const char *con_id, 81 const char *con_id,
82 enum gpiod_flags flags); 82 enum gpiod_flags flags);
83struct gpio_desc *__must_check 83struct gpio_desc *__must_check
84__devm_gpiod_get_index_optional(struct device *dev, const char *con_id, 84devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
85 unsigned int index, enum gpiod_flags flags); 85 unsigned int index, enum gpiod_flags flags);
86struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev, 86struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
87 const char *con_id, 87 const char *con_id,
@@ -146,31 +146,31 @@ static inline int gpiod_count(struct device *dev, const char *con_id)
146 return 0; 146 return 0;
147} 147}
148 148
149static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, 149static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
150 const char *con_id, 150 const char *con_id,
151 enum gpiod_flags flags) 151 enum gpiod_flags flags)
152{ 152{
153 return ERR_PTR(-ENOSYS); 153 return ERR_PTR(-ENOSYS);
154} 154}
155static inline struct gpio_desc *__must_check 155static inline struct gpio_desc *__must_check
156__gpiod_get_index(struct device *dev, 156gpiod_get_index(struct device *dev,
157 const char *con_id, 157 const char *con_id,
158 unsigned int idx, 158 unsigned int idx,
159 enum gpiod_flags flags) 159 enum gpiod_flags flags)
160{ 160{
161 return ERR_PTR(-ENOSYS); 161 return ERR_PTR(-ENOSYS);
162} 162}
163 163
164static inline struct gpio_desc *__must_check 164static inline struct gpio_desc *__must_check
165__gpiod_get_optional(struct device *dev, const char *con_id, 165gpiod_get_optional(struct device *dev, const char *con_id,
166 enum gpiod_flags flags) 166 enum gpiod_flags flags)
167{ 167{
168 return ERR_PTR(-ENOSYS); 168 return ERR_PTR(-ENOSYS);
169} 169}
170 170
171static inline struct gpio_desc *__must_check 171static inline struct gpio_desc *__must_check
172__gpiod_get_index_optional(struct device *dev, const char *con_id, 172gpiod_get_index_optional(struct device *dev, const char *con_id,
173 unsigned int index, enum gpiod_flags flags) 173 unsigned int index, enum gpiod_flags flags)
174{ 174{
175 return ERR_PTR(-ENOSYS); 175 return ERR_PTR(-ENOSYS);
176} 176}
@@ -206,7 +206,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
206} 206}
207 207
208static inline struct gpio_desc *__must_check 208static inline struct gpio_desc *__must_check
209__devm_gpiod_get(struct device *dev, 209devm_gpiod_get(struct device *dev,
210 const char *con_id, 210 const char *con_id,
211 enum gpiod_flags flags) 211 enum gpiod_flags flags)
212{ 212{
@@ -214,7 +214,7 @@ __devm_gpiod_get(struct device *dev,
214} 214}
215static inline 215static inline
216struct gpio_desc *__must_check 216struct gpio_desc *__must_check
217__devm_gpiod_get_index(struct device *dev, 217devm_gpiod_get_index(struct device *dev,
218 const char *con_id, 218 const char *con_id,
219 unsigned int idx, 219 unsigned int idx,
220 enum gpiod_flags flags) 220 enum gpiod_flags flags)
@@ -223,14 +223,14 @@ __devm_gpiod_get_index(struct device *dev,
223} 223}
224 224
225static inline struct gpio_desc *__must_check 225static inline struct gpio_desc *__must_check
226__devm_gpiod_get_optional(struct device *dev, const char *con_id, 226devm_gpiod_get_optional(struct device *dev, const char *con_id,
227 enum gpiod_flags flags) 227 enum gpiod_flags flags)
228{ 228{
229 return ERR_PTR(-ENOSYS); 229 return ERR_PTR(-ENOSYS);
230} 230}
231 231
232static inline struct gpio_desc *__must_check 232static inline struct gpio_desc *__must_check
233__devm_gpiod_get_index_optional(struct device *dev, const char *con_id, 233devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
234 unsigned int index, enum gpiod_flags flags) 234 unsigned int index, enum gpiod_flags flags)
235{ 235{
236 return ERR_PTR(-ENOSYS); 236 return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@ static inline struct gpio_desc *devm_get_gpiod_from_child(
424 424
425#endif /* CONFIG_GPIOLIB */ 425#endif /* CONFIG_GPIOLIB */
426 426
427/*
428 * Vararg-hacks! This is done to transition the kernel to always pass
429 * the options flags argument to the below functions. During a transition
430 * phase these vararg macros make both old-and-newstyle code compile,
431 * but when all calls to the elder API are removed, these should go away
432 * and the __gpiod_get() etc functions above be renamed just gpiod_get()
433 * etc.
434 */
435#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
436#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
437#define __gpiod_get_index(dev, con_id, index, flags, ...) \
438 __gpiod_get_index(dev, con_id, index, flags)
439#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
440#define __gpiod_get_optional(dev, con_id, flags, ...) \
441 __gpiod_get_optional(dev, con_id, flags)
442#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
443#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
444 __gpiod_get_index_optional(dev, con_id, index, flags)
445#define gpiod_get_index_optional(varargs...) \
446 __gpiod_get_index_optional(varargs, GPIOD_ASIS)
447#define __devm_gpiod_get(dev, con_id, flags, ...) \
448 __devm_gpiod_get(dev, con_id, flags)
449#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
450#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
451 __devm_gpiod_get_index(dev, con_id, index, flags)
452#define devm_gpiod_get_index(varargs...) \
453 __devm_gpiod_get_index(varargs, GPIOD_ASIS)
454#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
455 __devm_gpiod_get_optional(dev, con_id, flags)
456#define devm_gpiod_get_optional(varargs...) \
457 __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
458#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
459 __devm_gpiod_get_index_optional(dev, con_id, index, flags)
460#define devm_gpiod_get_index_optional(varargs...) \
461 __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
462
463#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) 427#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
464 428
465int gpiod_export(struct gpio_desc *desc, bool direction_may_change); 429int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c8393cd4d44f..1aed31c5ffba 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <linux/irqchip/chained_irq.h> 7#include <linux/irqchip/chained_irq.h>
8#include <linux/irqdomain.h> 8#include <linux/irqdomain.h>
9#include <linux/lockdep.h>
9#include <linux/pinctrl/pinctrl.h> 10#include <linux/pinctrl/pinctrl.h>
10 11
11struct device; 12struct device;
@@ -64,6 +65,17 @@ struct seq_file;
64 * registers. 65 * registers.
65 * @irq_not_threaded: flag must be set if @can_sleep is set but the 66 * @irq_not_threaded: flag must be set if @can_sleep is set but the
66 * IRQs don't need to be threaded 67 * IRQs don't need to be threaded
68 * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
69 * @irqdomain: Interrupt translation domain; responsible for mapping
70 * between GPIO hwirq number and linux irq number
71 * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
72 * @irq_handler: the irq handler to use (often a predefined irq core function)
73 * for GPIO IRQs, provided by GPIO driver
74 * @irq_default_type: default IRQ triggering type applied during GPIO driver
75 * initialization, provided by GPIO driver
76 * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
77 * provided by GPIO driver
78 * @lock_key: per GPIO IRQ chip lockdep class
67 * 79 *
68 * A gpio_chip can help platforms abstract various sources of GPIOs so 80 * A gpio_chip can help platforms abstract various sources of GPIOs so
69 * they can all be accessed through a common programing interface. 81 * they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@ struct gpio_chip {
126 irq_flow_handler_t irq_handler; 138 irq_flow_handler_t irq_handler;
127 unsigned int irq_default_type; 139 unsigned int irq_default_type;
128 int irq_parent; 140 int irq_parent;
141 struct lock_class_key *lock_key;
129#endif 142#endif
130 143
131#if defined(CONFIG_OF_GPIO) 144#if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
171 int parent_irq, 184 int parent_irq,
172 irq_flow_handler_t parent_handler); 185 irq_flow_handler_t parent_handler);
173 186
174int gpiochip_irqchip_add(struct gpio_chip *gpiochip, 187int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
175 struct irq_chip *irqchip, 188 struct irq_chip *irqchip,
176 unsigned int first_irq, 189 unsigned int first_irq,
177 irq_flow_handler_t handler, 190 irq_flow_handler_t handler,
178 unsigned int type); 191 unsigned int type,
192 struct lock_class_key *lock_key);
193
194#ifdef CONFIG_LOCKDEP
195#define gpiochip_irqchip_add(...) \
196( \
197 ({ \
198 static struct lock_class_key _key; \
199 _gpiochip_irqchip_add(__VA_ARGS__, &_key); \
200 }) \
201)
202#else
203#define gpiochip_irqchip_add(...) \
204 _gpiochip_irqchip_add(__VA_ARGS__, NULL)
205#endif
179 206
180#endif /* CONFIG_GPIOLIB_IRQCHIP */ 207#endif /* CONFIG_GPIOLIB_IRQCHIP */
181 208
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index e2706140eaff..c0d712d22b07 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
57} 57}
58 58
59void gpiod_add_lookup_table(struct gpiod_lookup_table *table); 59void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
60void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
60 61
61#endif /* __LINUX_GPIO_MACHINE_H */ 62#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f10b20f05159..ecb080d6ff42 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr, pgprot_t newprot, 34 unsigned long addr, pgprot_t newprot,
35 int prot_numa); 35 int prot_numa);
36int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
37 unsigned long pfn, bool write);
36 38
37enum transparent_hugepage_flag { 39enum transparent_hugepage_flag {
38 TRANSPARENT_HUGEPAGE_FLAG, 40 TRANSPARENT_HUGEPAGE_FLAG,
@@ -122,7 +124,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
122#endif 124#endif
123extern int hugepage_madvise(struct vm_area_struct *vma, 125extern int hugepage_madvise(struct vm_area_struct *vma,
124 unsigned long *vm_flags, int advice); 126 unsigned long *vm_flags, int advice);
125extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, 127extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
126 unsigned long start, 128 unsigned long start,
127 unsigned long end, 129 unsigned long end,
128 long adjust_next); 130 long adjust_next);
@@ -138,15 +140,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
138 else 140 else
139 return 0; 141 return 0;
140} 142}
141static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
142 unsigned long start,
143 unsigned long end,
144 long adjust_next)
145{
146 if (!vma->anon_vma || vma->vm_ops)
147 return;
148 __vma_adjust_trans_huge(vma, start, end, adjust_next);
149}
150static inline int hpage_nr_pages(struct page *page) 143static inline int hpage_nr_pages(struct page *page)
151{ 144{
152 if (unlikely(PageTransHuge(page))) 145 if (unlikely(PageTransHuge(page)))
@@ -164,6 +157,13 @@ static inline bool is_huge_zero_page(struct page *page)
164 return ACCESS_ONCE(huge_zero_page) == page; 157 return ACCESS_ONCE(huge_zero_page) == page;
165} 158}
166 159
160static inline bool is_huge_zero_pmd(pmd_t pmd)
161{
162 return is_huge_zero_page(pmd_page(pmd));
163}
164
165struct page *get_huge_zero_page(void);
166
167#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 167#else /* CONFIG_TRANSPARENT_HUGEPAGE */
168#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 168#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
169#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 169#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d891f949466a..5e35379f58a5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,9 @@ struct resv_map {
35 struct kref refs; 35 struct kref refs;
36 spinlock_t lock; 36 spinlock_t lock;
37 struct list_head regions; 37 struct list_head regions;
38 long adds_in_progress;
39 struct list_head region_cache;
40 long region_cache_count;
38}; 41};
39extern struct resv_map *resv_map_alloc(void); 42extern struct resv_map *resv_map_alloc(void);
40void resv_map_release(struct kref *ref); 43void resv_map_release(struct kref *ref);
@@ -80,11 +83,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80int hugetlb_reserve_pages(struct inode *inode, long from, long to, 83int hugetlb_reserve_pages(struct inode *inode, long from, long to,
81 struct vm_area_struct *vma, 84 struct vm_area_struct *vma,
82 vm_flags_t vm_flags); 85 vm_flags_t vm_flags);
83void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); 86long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
87 long freed);
84int dequeue_hwpoisoned_huge_page(struct page *page); 88int dequeue_hwpoisoned_huge_page(struct page *page);
85bool isolate_huge_page(struct page *page, struct list_head *list); 89bool isolate_huge_page(struct page *page, struct list_head *list);
86void putback_active_hugepage(struct page *page); 90void putback_active_hugepage(struct page *page);
87void free_huge_page(struct page *page); 91void free_huge_page(struct page *page);
92void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
93extern struct mutex *hugetlb_fault_mutex_table;
94u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
95 struct vm_area_struct *vma,
96 struct address_space *mapping,
97 pgoff_t idx, unsigned long address);
88 98
89#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 99#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
90pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 100pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -320,9 +330,13 @@ struct huge_bootmem_page {
320#endif 330#endif
321}; 331};
322 332
333struct page *alloc_huge_page(struct vm_area_struct *vma,
334 unsigned long addr, int avoid_reserve);
323struct page *alloc_huge_page_node(struct hstate *h, int nid); 335struct page *alloc_huge_page_node(struct hstate *h, int nid);
324struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 336struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
325 unsigned long addr, int avoid_reserve); 337 unsigned long addr, int avoid_reserve);
338int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
339 pgoff_t idx);
326 340
327/* arch callback */ 341/* arch callback */
328int __init alloc_bootmem_huge_page(struct hstate *h); 342int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -471,6 +485,7 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
471 485
472#else /* CONFIG_HUGETLB_PAGE */ 486#else /* CONFIG_HUGETLB_PAGE */
473struct hstate {}; 487struct hstate {};
488#define alloc_huge_page(v, a, r) NULL
474#define alloc_huge_page_node(h, nid) NULL 489#define alloc_huge_page_node(h, nid) NULL
475#define alloc_huge_page_noerr(v, a, r) NULL 490#define alloc_huge_page_noerr(v, a, r) NULL
476#define alloc_bootmem_huge_page(h) NULL 491#define alloc_bootmem_huge_page(h) NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 30d3a1f79450..54733d5b503e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -977,6 +977,11 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
977 const char *mod_name); 977 const char *mod_name);
978void vmbus_driver_unregister(struct hv_driver *hv_driver); 978void vmbus_driver_unregister(struct hv_driver *hv_driver);
979 979
980int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
981 resource_size_t min, resource_size_t max,
982 resource_size_t size, resource_size_t align,
983 bool fb_overlap_ok);
984
980/** 985/**
981 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device 986 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
982 * 987 *
@@ -1233,8 +1238,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1233 1238
1234void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1239void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1235 1240
1236extern struct resource hyperv_mmio;
1237
1238/* 1241/*
1239 * Negotiated version with the Host. 1242 * Negotiated version with the Host.
1240 */ 1243 */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e83a738a3b87..768063baafbf 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -121,6 +121,9 @@ extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
121extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, 121extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
122 u8 command, u8 length, 122 u8 command, u8 length,
123 const u8 *values); 123 const u8 *values);
124extern s32
125i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
126 u8 command, u8 length, u8 *values);
124#endif /* I2C */ 127#endif /* I2C */
125 128
126/** 129/**
@@ -550,11 +553,12 @@ void i2c_lock_adapter(struct i2c_adapter *);
550void i2c_unlock_adapter(struct i2c_adapter *); 553void i2c_unlock_adapter(struct i2c_adapter *);
551 554
552/*flags for the client struct: */ 555/*flags for the client struct: */
553#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ 556#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
554#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ 557#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
555 /* Must equal I2C_M_TEN below */ 558 /* Must equal I2C_M_TEN below */
556#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ 559#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
557#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ 560#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
561#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
558 /* Must match I2C_M_STOP|IGNORE_NAK */ 562 /* Must match I2C_M_STOP|IGNORE_NAK */
559 563
560/* i2c adapter classes (bitmask) */ 564/* i2c adapter classes (bitmask) */
@@ -638,6 +642,8 @@ extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
638/* must call put_device() when done with returned i2c_adapter device */ 642/* must call put_device() when done with returned i2c_adapter device */
639extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); 643extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
640 644
645/* must call i2c_put_adapter() when done with returned i2c_adapter device */
646struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
641#else 647#else
642 648
643static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) 649static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -649,6 +655,11 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
649{ 655{
650 return NULL; 656 return NULL;
651} 657}
658
659static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
660{
661 return NULL;
662}
652#endif /* CONFIG_OF */ 663#endif /* CONFIG_OF */
653 664
654#endif /* _LINUX_I2C_H */ 665#endif /* _LINUX_I2C_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b9c7897dc566..cfa906f28b7a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
2074#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) 2074#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
2075#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) 2075#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
2076 2076
2077#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
2077#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) 2078#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
2078#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
2079 2079
2080/* TDLS specific payload type in the LLC/SNAP header */ 2080/* TDLS specific payload type in the LLC/SNAP header */
2081#define WLAN_TDLS_SNAP_RFTYPE 0x2 2081#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 193ad488d3e2..908429216d9f 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,6 +37,7 @@ static inline struct igmpv3_query *
37 return (struct igmpv3_query *)skb_transport_header(skb); 37 return (struct igmpv3_query *)skb_transport_header(skb);
38} 38}
39 39
40extern int sysctl_igmp_llm_reports;
40extern int sysctl_igmp_max_memberships; 41extern int sysctl_igmp_max_memberships;
41extern int sysctl_igmp_max_msf; 42extern int sysctl_igmp_max_msf;
42extern int sysctl_igmp_qrv; 43extern int sysctl_igmp_qrv;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2c476acb87d9..3c17cd7fdf06 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -166,6 +166,7 @@ struct st_sensor_transfer_function {
166/** 166/**
167 * struct st_sensor_settings - ST specific sensor settings 167 * struct st_sensor_settings - ST specific sensor settings
168 * @wai: Contents of WhoAmI register. 168 * @wai: Contents of WhoAmI register.
169 * @wai_addr: The address of WhoAmI register.
169 * @sensors_supported: List of supported sensors by struct itself. 170 * @sensors_supported: List of supported sensors by struct itself.
170 * @ch: IIO channels for the sensor. 171 * @ch: IIO channels for the sensor.
171 * @odr: Output data rate register and ODR list available. 172 * @odr: Output data rate register and ODR list available.
@@ -179,6 +180,7 @@ struct st_sensor_transfer_function {
179 */ 180 */
180struct st_sensor_settings { 181struct st_sensor_settings {
181 u8 wai; 182 u8 wai;
183 u8 wai_addr;
182 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; 184 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
183 struct iio_chan_spec *ch; 185 struct iio_chan_spec *ch;
184 int num_ch; 186 int num_ch;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 26fb8f6342bb..fad58671c49e 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -100,7 +100,7 @@ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
100 100
101/** 101/**
102 * iio_channel_cb_get_channels() - get access to the underlying channels. 102 * iio_channel_cb_get_channels() - get access to the underlying channels.
103 * @cb_buff: The callback buffer from whom we want the channel 103 * @cb_buffer: The callback buffer from whom we want the channel
104 * information. 104 * information.
105 * 105 *
106 * This function allows one to obtain information about the channels. 106 * This function allows one to obtain information about the channels.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f79148261d16..7bb7f673cb3f 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -645,6 +645,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
645#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL) 645#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
646 646
647/** 647/**
648 * IIO_RAD_TO_DEGREE() - Convert rad to degree
649 * @rad: A value in rad
650 *
651 * Returns the given value converted from rad to degree
652 */
653#define IIO_RAD_TO_DEGREE(rad) \
654 (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
655
656/**
648 * IIO_G_TO_M_S_2() - Convert g to meter / second**2 657 * IIO_G_TO_M_S_2() - Convert g to meter / second**2
649 * @g: A value in g 658 * @g: A value in g
650 * 659 *
@@ -652,4 +661,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
652 */ 661 */
653#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL) 662#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
654 663
664/**
665 * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
666 * @ms2: A value in meter / second**2
667 *
668 * Returns the given value converted from meter / second**2 to g
669 */
670#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
671
655#endif /* _INDUSTRIAL_IO_H_ */ 672#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 8a1d18640ab9..9cd8f747212f 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -18,7 +18,8 @@ struct iio_chan_spec;
18 * struct iio_dev_attr - iio specific device attribute 18 * struct iio_dev_attr - iio specific device attribute
19 * @dev_attr: underlying device attribute 19 * @dev_attr: underlying device attribute
20 * @address: associated register address 20 * @address: associated register address
21 * @l: list head for maintaining list of dynamically created attrs. 21 * @l: list head for maintaining list of dynamically created attrs
22 * @c: specification for the underlying channel
22 */ 23 */
23struct iio_dev_attr { 24struct iio_dev_attr {
24 struct device_attribute dev_attr; 25 struct device_attribute dev_attr;
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index fa76c79a52a1..1c9e028e0d4a 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -18,6 +18,9 @@ struct iio_subirq {
18 bool enabled; 18 bool enabled;
19}; 19};
20 20
21struct iio_dev;
22struct iio_trigger;
23
21/** 24/**
22 * struct iio_trigger_ops - operations structure for an iio_trigger. 25 * struct iio_trigger_ops - operations structure for an iio_trigger.
23 * @owner: used to monitor usage count of the trigger. 26 * @owner: used to monitor usage count of the trigger.
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index c378ebec605e..f72f70d5a97b 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -7,8 +7,8 @@ struct iio_dev;
7struct iio_buffer_setup_ops; 7struct iio_buffer_setup_ops;
8 8
9int iio_triggered_buffer_setup(struct iio_dev *indio_dev, 9int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
10 irqreturn_t (*pollfunc_bh)(int irq, void *p), 10 irqreturn_t (*h)(int irq, void *p),
11 irqreturn_t (*pollfunc_th)(int irq, void *p), 11 irqreturn_t (*thread)(int irq, void *p),
12 const struct iio_buffer_setup_ops *setup_ops); 12 const struct iio_buffer_setup_ops *setup_ops);
13void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); 13void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
14 14
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e8493fee8160..d0b380ee7d67 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,6 +32,14 @@ extern struct fs_struct init_fs;
32#define INIT_CPUSET_SEQ(tsk) 32#define INIT_CPUSET_SEQ(tsk)
33#endif 33#endif
34 34
35#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
36#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
37 .lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
38},
39#else
40#define INIT_PREV_CPUTIME(x)
41#endif
42
35#define INIT_SIGNALS(sig) { \ 43#define INIT_SIGNALS(sig) { \
36 .nr_threads = 1, \ 44 .nr_threads = 1, \
37 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \ 45 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -46,6 +54,7 @@ extern struct fs_struct init_fs;
46 .cputime_atomic = INIT_CPUTIME_ATOMIC, \ 54 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
47 .running = 0, \ 55 .running = 0, \
48 }, \ 56 }, \
57 INIT_PREV_CPUTIME(sig) \
49 .cred_guard_mutex = \ 58 .cred_guard_mutex = \
50 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 59 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
51} 60}
@@ -246,6 +255,7 @@ extern struct task_group root_task_group;
246 INIT_TASK_RCU_TASKS(tsk) \ 255 INIT_TASK_RCU_TASKS(tsk) \
247 INIT_CPUSET_SEQ(tsk) \ 256 INIT_CPUSET_SEQ(tsk) \
248 INIT_RT_MUTEXES(tsk) \ 257 INIT_RT_MUTEXES(tsk) \
258 INIT_PREV_CPUTIME(tsk) \
249 INIT_VTIME(tsk) \ 259 INIT_VTIME(tsk) \
250 INIT_NUMA_BALANCING(tsk) \ 260 INIT_NUMA_BALANCING(tsk) \
251 INIT_KASAN(tsk) \ 261 INIT_KASAN(tsk) \
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index eecc9ea6cd58..c91e1376132b 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -9,15 +9,8 @@
9#ifndef _TOUCHSCREEN_H 9#ifndef _TOUCHSCREEN_H
10#define _TOUCHSCREEN_H 10#define _TOUCHSCREEN_H
11 11
12#include <linux/input.h> 12struct input_dev;
13 13
14#ifdef CONFIG_OF 14void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
15void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
16#else
17static inline void touchscreen_parse_of_params(struct input_dev *dev,
18 bool multitouch)
19{
20}
21#endif
22 15
23#endif 16#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d9a366d24e3b..6240063bdcac 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -344,7 +344,7 @@ struct intel_iommu {
344 344
345#ifdef CONFIG_INTEL_IOMMU 345#ifdef CONFIG_INTEL_IOMMU
346 unsigned long *domain_ids; /* bitmap of domains */ 346 unsigned long *domain_ids; /* bitmap of domains */
347 struct dmar_domain **domains; /* ptr to domains */ 347 struct dmar_domain ***domains; /* ptr to domains */
348 spinlock_t lock; /* protect context, domain ids */ 348 spinlock_t lock; /* protect context, domain ids */
349 struct root_entry *root_entry; /* virtual address */ 349 struct root_entry *root_entry; /* virtual address */
350 350
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index c27dde7215b5..e399029b68c5 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -21,7 +21,7 @@
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/bug.h> 23#include <linux/bug.h>
24#include <asm/io.h> 24#include <linux/io.h>
25#include <asm/page.h> 25#include <asm/page.h>
26 26
27/* 27/*
diff --git a/include/linux/io.h b/include/linux/io.h
index fb5a99800e77..de64c1e53612 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -20,10 +20,13 @@
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/bug.h>
24#include <linux/err.h>
23#include <asm/io.h> 25#include <asm/io.h>
24#include <asm/page.h> 26#include <asm/page.h>
25 27
26struct device; 28struct device;
29struct resource;
27 30
28__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); 31__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
29void __iowrite64_copy(void __iomem *to, const void *from, size_t count); 32void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -80,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr,
80 const unsigned char *signature, int length); 83 const unsigned char *signature, int length);
81void devm_ioremap_release(struct device *dev, void *res); 84void devm_ioremap_release(struct device *dev, void *res);
82 85
86void *devm_memremap(struct device *dev, resource_size_t offset,
87 size_t size, unsigned long flags);
88void devm_memunmap(struct device *dev, void *addr);
89
90void *__devm_memremap_pages(struct device *dev, struct resource *res);
91
92#ifdef CONFIG_ZONE_DEVICE
93void *devm_memremap_pages(struct device *dev, struct resource *res);
94#else
95static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
96{
97 /*
98 * Fail attempts to call devm_memremap_pages() without
99 * ZONE_DEVICE support enabled, this requires callers to fall
100 * back to plain devm_memremap() based on config
101 */
102 WARN_ON_ONCE(1);
103 return ERR_PTR(-ENXIO);
104}
105#endif
106
83/* 107/*
84 * Some systems do not have legacy ISA devices. 108 * Some systems do not have legacy ISA devices.
85 * /dev/port is not a valid interface on these systems. 109 * /dev/port is not a valid interface on these systems.
@@ -121,4 +145,13 @@ static inline int arch_phys_wc_index(int handle)
121#endif 145#endif
122#endif 146#endif
123 147
148enum {
149 /* See memremap() kernel-doc for usage description... */
150 MEMREMAP_WB = 1 << 0,
151 MEMREMAP_WT = 1 << 1,
152};
153
154void *memremap(resource_size_t offset, size_t size, unsigned long flags);
155void memunmap(void *addr);
156
124#endif /* _LINUX_IO_H */ 157#endif /* _LINUX_IO_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 0b1e569f5ff5..f8cea14485dd 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,11 @@ struct ipmi_smi_handlers {
115 implement it. */ 115 implement it. */
116 void (*set_need_watch)(void *send_info, bool enable); 116 void (*set_need_watch)(void *send_info, bool enable);
117 117
118 /*
119 * Called when flushing all pending messages.
120 */
121 void (*flush_messages)(void *send_info);
122
118 /* Called when the interface should go into "run to 123 /* Called when the interface should go into "run to
119 completion" mode. If this call sets the value to true, the 124 completion" mode. If this call sets the value to true, the
120 interface should make sure that all messages are flushed 125 interface should make sure that all messages are flushed
@@ -207,7 +212,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
207 upper layer until the start_processing() function in the handlers 212 upper layer until the start_processing() function in the handlers
208 is called, and the lower layer must get the interface from that 213 is called, and the lower layer must get the interface from that
209 call. */ 214 call. */
210int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 215int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
211 void *send_info, 216 void *send_info,
212 struct ipmi_device_id *device_id, 217 struct ipmi_device_id *device_id,
213 struct device *dev, 218 struct device *dev,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c60aa42..f1f32af6d9b9 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,7 +29,9 @@ struct ipv6_devconf {
29 __s32 max_desync_factor; 29 __s32 max_desync_factor;
30 __s32 max_addresses; 30 __s32 max_addresses;
31 __s32 accept_ra_defrtr; 31 __s32 accept_ra_defrtr;
32 __s32 accept_ra_min_hop_limit;
32 __s32 accept_ra_pinfo; 33 __s32 accept_ra_pinfo;
34 __s32 ignore_routes_with_linkdown;
33#ifdef CONFIG_IPV6_ROUTER_PREF 35#ifdef CONFIG_IPV6_ROUTER_PREF
34 __s32 accept_ra_rtr_pref; 36 __s32 accept_ra_rtr_pref;
35 __s32 rtr_probe_interval; 37 __s32 rtr_probe_interval;
@@ -57,6 +59,7 @@ struct ipv6_devconf {
57 bool initialized; 59 bool initialized;
58 struct in6_addr secret; 60 struct in6_addr secret;
59 } stable_secret; 61 } stable_secret;
62 __s32 use_oif_addrs_only;
60 void *sysctl; 63 void *sysctl;
61}; 64};
62 65
@@ -94,7 +97,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
94struct inet6_skb_parm { 97struct inet6_skb_parm {
95 int iif; 98 int iif;
96 __be16 ra; 99 __be16 ra;
97 __u16 hop;
98 __u16 dst0; 100 __u16 dst0;
99 __u16 srcrt; 101 __u16 srcrt;
100 __u16 dst1; 102 __u16 dst1;
@@ -111,6 +113,7 @@ struct inet6_skb_parm {
111#define IP6SKB_REROUTED 4 113#define IP6SKB_REROUTED 4
112#define IP6SKB_ROUTERALERT 8 114#define IP6SKB_ROUTERALERT 8
113#define IP6SKB_FRAGMENTED 16 115#define IP6SKB_FRAGMENTED 16
116#define IP6SKB_HOPBYHOP 32
114}; 117};
115 118
116#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 119#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 51744bcf74ee..6f8b34066442 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -324,8 +324,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
324 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 324 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
325 * @irq_cpu_online: configure an interrupt source for a secondary CPU 325 * @irq_cpu_online: configure an interrupt source for a secondary CPU
326 * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU 326 * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
327 * @irq_suspend: function called from core code on suspend once per chip 327 * @irq_suspend: function called from core code on suspend once per
328 * @irq_resume: function called from core code on resume once per chip 328 * chip, when one or more interrupts are installed
329 * @irq_resume: function called from core code on resume once per chip,
330 * when one ore more interrupts are installed
329 * @irq_pm_shutdown: function called from core code on shutdown once per chip 331 * @irq_pm_shutdown: function called from core code on shutdown once per chip
330 * @irq_calc_mask: Optional function to set irq_data.mask for special cases 332 * @irq_calc_mask: Optional function to set irq_data.mask for special cases
331 * @irq_print_chip: optional to print special chip info in show_interrupts 333 * @irq_print_chip: optional to print special chip info in show_interrupts
@@ -488,8 +490,7 @@ extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
488#endif 490#endif
489 491
490/* Handling of unhandled and spurious interrupts: */ 492/* Handling of unhandled and spurious interrupts: */
491extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 493extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
492 irqreturn_t action_ret);
493 494
494 495
495/* Enable/disable irq debugging output: */ 496/* Enable/disable irq debugging output: */
@@ -640,7 +641,7 @@ static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
640 return d ? d->msi_desc : NULL; 641 return d ? d->msi_desc : NULL;
641} 642}
642 643
643static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) 644static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
644{ 645{
645 return d->msi_desc; 646 return d->msi_desc;
646} 647}
@@ -762,6 +763,12 @@ struct irq_chip_type {
762 * @reg_base: Register base address (virtual) 763 * @reg_base: Register base address (virtual)
763 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) 764 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
764 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) 765 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
766 * @suspend: Function called from core code on suspend once per
767 * chip; can be useful instead of irq_chip::suspend to
768 * handle chip details even when no interrupts are in use
769 * @resume: Function called from core code on resume once per chip;
770 * can be useful instead of irq_chip::suspend to handle
771 * chip details even when no interrupts are in use
765 * @irq_base: Interrupt base nr for this chip 772 * @irq_base: Interrupt base nr for this chip
766 * @irq_cnt: Number of interrupts handled by this chip 773 * @irq_cnt: Number of interrupts handled by this chip
767 * @mask_cache: Cached mask register shared between all chip types 774 * @mask_cache: Cached mask register shared between all chip types
@@ -788,6 +795,8 @@ struct irq_chip_generic {
788 void __iomem *reg_base; 795 void __iomem *reg_base;
789 u32 (*reg_readl)(void __iomem *addr); 796 u32 (*reg_readl)(void __iomem *addr);
790 void (*reg_writel)(u32 val, void __iomem *addr); 797 void (*reg_writel)(u32 val, void __iomem *addr);
798 void (*suspend)(struct irq_chip_generic *gc);
799 void (*resume)(struct irq_chip_generic *gc);
791 unsigned int irq_base; 800 unsigned int irq_base;
792 unsigned int irq_cnt; 801 unsigned int irq_cnt;
793 u32 mask_cache; 802 u32 mask_cache;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index ffbc034c8810..9eeeb9589acf 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -104,6 +104,8 @@
104#define GICR_SYNCR 0x00C0 104#define GICR_SYNCR 0x00C0
105#define GICR_MOVLPIR 0x0100 105#define GICR_MOVLPIR 0x0100
106#define GICR_MOVALLR 0x0110 106#define GICR_MOVALLR 0x0110
107#define GICR_ISACTIVER GICD_ISACTIVER
108#define GICR_ICACTIVER GICD_ICACTIVER
107#define GICR_IDREGS GICD_IDREGS 109#define GICR_IDREGS GICD_IDREGS
108#define GICR_PIDR2 GICD_PIDR2 110#define GICR_PIDR2 GICD_PIDR2
109 111
@@ -268,9 +270,12 @@
268 270
269#define ICH_LR_EOI (1UL << 41) 271#define ICH_LR_EOI (1UL << 41)
270#define ICH_LR_GROUP (1UL << 60) 272#define ICH_LR_GROUP (1UL << 60)
273#define ICH_LR_HW (1UL << 61)
271#define ICH_LR_STATE (3UL << 62) 274#define ICH_LR_STATE (3UL << 62)
272#define ICH_LR_PENDING_BIT (1UL << 62) 275#define ICH_LR_PENDING_BIT (1UL << 62)
273#define ICH_LR_ACTIVE_BIT (1UL << 63) 276#define ICH_LR_ACTIVE_BIT (1UL << 63)
277#define ICH_LR_PHYS_ID_SHIFT 32
278#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
274 279
275#define ICH_MISR_EOI (1 << 0) 280#define ICH_MISR_EOI (1 << 0)
276#define ICH_MISR_U (1 << 1) 281#define ICH_MISR_U (1 << 1)
@@ -288,6 +293,7 @@
288#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) 293#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
289 294
290#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) 295#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
296#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
291#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) 297#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
292#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) 298#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
293#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) 299#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
@@ -360,6 +366,7 @@
360#ifndef __ASSEMBLY__ 366#ifndef __ASSEMBLY__
361 367
362#include <linux/stringify.h> 368#include <linux/stringify.h>
369#include <asm/msi.h>
363 370
364/* 371/*
365 * We need a value to serve as a irq-type for LPIs. Choose one that will 372 * We need a value to serve as a irq-type for LPIs. Choose one that will
@@ -384,6 +391,12 @@ static inline void gic_write_eoir(u64 irq)
384 isb(); 391 isb();
385} 392}
386 393
394static inline void gic_write_dir(u64 irq)
395{
396 asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
397 isb();
398}
399
387struct irq_domain; 400struct irq_domain;
388int its_cpu_init(void); 401int its_cpu_init(void);
389int its_init(struct device_node *node, struct rdists *rdists, 402int its_init(struct device_node *node, struct rdists *rdists,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9de976b4f9a7..b8901dfd9e95 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -20,9 +20,13 @@
20#define GIC_CPU_ALIAS_BINPOINT 0x1c 20#define GIC_CPU_ALIAS_BINPOINT 0x1c
21#define GIC_CPU_ACTIVEPRIO 0xd0 21#define GIC_CPU_ACTIVEPRIO 0xd0
22#define GIC_CPU_IDENT 0xfc 22#define GIC_CPU_IDENT 0xfc
23#define GIC_CPU_DEACTIVATE 0x1000
23 24
24#define GICC_ENABLE 0x1 25#define GICC_ENABLE 0x1
25#define GICC_INT_PRI_THRESHOLD 0xf0 26#define GICC_INT_PRI_THRESHOLD 0xf0
27
28#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
29
26#define GICC_IAR_INT_ID_MASK 0x3ff 30#define GICC_IAR_INT_ID_MASK 0x3ff
27#define GICC_INT_SPURIOUS 1023 31#define GICC_INT_SPURIOUS 1023
28#define GICC_DIS_BYPASS_MASK 0x1e0 32#define GICC_DIS_BYPASS_MASK 0x1e0
@@ -71,11 +75,12 @@
71 75
72#define GICH_LR_VIRTUALID (0x3ff << 0) 76#define GICH_LR_VIRTUALID (0x3ff << 0)
73#define GICH_LR_PHYSID_CPUID_SHIFT (10) 77#define GICH_LR_PHYSID_CPUID_SHIFT (10)
74#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT) 78#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
75#define GICH_LR_STATE (3 << 28) 79#define GICH_LR_STATE (3 << 28)
76#define GICH_LR_PENDING_BIT (1 << 28) 80#define GICH_LR_PENDING_BIT (1 << 28)
77#define GICH_LR_ACTIVE_BIT (1 << 29) 81#define GICH_LR_ACTIVE_BIT (1 << 29)
78#define GICH_LR_EOI (1 << 19) 82#define GICH_LR_EOI (1 << 19)
83#define GICH_LR_HW (1 << 31)
79 84
80#define GICH_VMCR_CTRL_SHIFT 0 85#define GICH_VMCR_CTRL_SHIFT 0
81#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) 86#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
@@ -95,11 +100,10 @@
95 100
96struct device_node; 101struct device_node;
97 102
98void gic_set_irqchip_flags(unsigned long flags);
99void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, 103void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
100 u32 offset, struct device_node *); 104 u32 offset, struct device_node *);
101void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 105void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
102void gic_cpu_if_down(void); 106int gic_cpu_if_down(unsigned int gic_nr);
103 107
104static inline void gic_init(unsigned int nr, int start, 108static inline void gic_init(unsigned int nr, int start,
105 void __iomem *dist , void __iomem *cpu) 109 void __iomem *dist , void __iomem *cpu)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 9b1ad3734911..4e6861605050 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -41,12 +41,20 @@
41 41
42/* Shared Global Counter */ 42/* Shared Global Counter */
43#define GIC_SH_COUNTER_31_00_OFS 0x0010 43#define GIC_SH_COUNTER_31_00_OFS 0x0010
44/* 64-bit counter register for CM3 */
45#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
44#define GIC_SH_COUNTER_63_32_OFS 0x0014 46#define GIC_SH_COUNTER_63_32_OFS 0x0014
45#define GIC_SH_REVISIONID_OFS 0x0020 47#define GIC_SH_REVISIONID_OFS 0x0020
46 48
47/* Convert an interrupt number to a byte offset/bit for multi-word registers */ 49/* Convert an interrupt number to a byte offset/bit for multi-word registers */
48#define GIC_INTR_OFS(intr) (((intr) / 32) * 4) 50#define GIC_INTR_OFS(intr) ({ \
49#define GIC_INTR_BIT(intr) ((intr) % 32) 51 unsigned bits = mips_cm_is64 ? 64 : 32; \
52 unsigned reg_idx = (intr) / bits; \
53 unsigned reg_width = bits / 8; \
54 \
55 reg_idx * reg_width; \
56})
57#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
50 58
51/* Polarity : Reset Value is always 0 */ 59/* Polarity : Reset Value is always 0 */
52#define GIC_SH_SET_POLARITY_OFS 0x0100 60#define GIC_SH_SET_POLARITY_OFS 0x0100
@@ -98,6 +106,8 @@
98#define GIC_VPE_WD_COUNT0_OFS 0x0094 106#define GIC_VPE_WD_COUNT0_OFS 0x0094
99#define GIC_VPE_WD_INITIAL0_OFS 0x0098 107#define GIC_VPE_WD_INITIAL0_OFS 0x0098
100#define GIC_VPE_COMPARE_LO_OFS 0x00a0 108#define GIC_VPE_COMPARE_LO_OFS 0x00a0
109/* 64-bit Compare register on CM3 */
110#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
101#define GIC_VPE_COMPARE_HI_OFS 0x00a4 111#define GIC_VPE_COMPARE_HI_OFS 0x00a4
102 112
103#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 113#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index fcea4e48e21f..5acfa26602e1 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -166,12 +166,16 @@ static inline int handle_domain_irq(struct irq_domain *domain,
166#endif 166#endif
167 167
168/* Test to see if a driver has successfully requested an irq */ 168/* Test to see if a driver has successfully requested an irq */
169static inline int irq_has_action(unsigned int irq) 169static inline int irq_desc_has_action(struct irq_desc *desc)
170{ 170{
171 struct irq_desc *desc = irq_to_desc(irq);
172 return desc->action != NULL; 171 return desc->action != NULL;
173} 172}
174 173
174static inline int irq_has_action(unsigned int irq)
175{
176 return irq_desc_has_action(irq_to_desc(irq));
177}
178
175/* caller has locked the irq_desc and both params are valid */ 179/* caller has locked the irq_desc and both params are valid */
176static inline void __irq_set_handler_locked(unsigned int irq, 180static inline void __irq_set_handler_locked(unsigned int irq,
177 irq_flow_handler_t handler) 181 irq_flow_handler_t handler)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 744ac0ec98eb..d3ca79236fb0 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -45,6 +45,20 @@ struct irq_data;
45/* Number of irqs reserved for a legacy isa controller */ 45/* Number of irqs reserved for a legacy isa controller */
46#define NUM_ISA_INTERRUPTS 16 46#define NUM_ISA_INTERRUPTS 16
47 47
48/*
49 * Should several domains have the same device node, but serve
50 * different purposes (for example one domain is for PCI/MSI, and the
51 * other for wired IRQs), they can be distinguished using a
52 * bus-specific token. Most domains are expected to only carry
53 * DOMAIN_BUS_ANY.
54 */
55enum irq_domain_bus_token {
56 DOMAIN_BUS_ANY = 0,
57 DOMAIN_BUS_PCI_MSI,
58 DOMAIN_BUS_PLATFORM_MSI,
59 DOMAIN_BUS_NEXUS,
60};
61
48/** 62/**
49 * struct irq_domain_ops - Methods for irq_domain objects 63 * struct irq_domain_ops - Methods for irq_domain objects
50 * @match: Match an interrupt controller device node to a host, returns 64 * @match: Match an interrupt controller device node to a host, returns
@@ -61,7 +75,8 @@ struct irq_data;
61 * to setup the irq_desc when returning from map(). 75 * to setup the irq_desc when returning from map().
62 */ 76 */
63struct irq_domain_ops { 77struct irq_domain_ops {
64 int (*match)(struct irq_domain *d, struct device_node *node); 78 int (*match)(struct irq_domain *d, struct device_node *node,
79 enum irq_domain_bus_token bus_token);
65 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); 80 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
66 void (*unmap)(struct irq_domain *d, unsigned int virq); 81 void (*unmap)(struct irq_domain *d, unsigned int virq);
67 int (*xlate)(struct irq_domain *d, struct device_node *node, 82 int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -116,6 +131,7 @@ struct irq_domain {
116 131
117 /* Optional data */ 132 /* Optional data */
118 struct device_node *of_node; 133 struct device_node *of_node;
134 enum irq_domain_bus_token bus_token;
119 struct irq_domain_chip_generic *gc; 135 struct irq_domain_chip_generic *gc;
120#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 136#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
121 struct irq_domain *parent; 137 struct irq_domain *parent;
@@ -161,9 +177,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
161 irq_hw_number_t first_hwirq, 177 irq_hw_number_t first_hwirq,
162 const struct irq_domain_ops *ops, 178 const struct irq_domain_ops *ops,
163 void *host_data); 179 void *host_data);
164extern struct irq_domain *irq_find_host(struct device_node *node); 180extern struct irq_domain *irq_find_matching_host(struct device_node *node,
181 enum irq_domain_bus_token bus_token);
165extern void irq_set_default_host(struct irq_domain *host); 182extern void irq_set_default_host(struct irq_domain *host);
166 183
184static inline struct irq_domain *irq_find_host(struct device_node *node)
185{
186 return irq_find_matching_host(node, DOMAIN_BUS_ANY);
187}
188
167/** 189/**
168 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. 190 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
169 * @of_node: pointer to interrupt controller's device tree node. 191 * @of_node: pointer to interrupt controller's device tree node.
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d32615280be9..000000000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
1/*
2 * linux/include/linux/jbd.h
3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>
5 *
6 * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
7 *
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
11 *
12 * Definitions for transaction data structures for the buffer cache
13 * filesystem journaling support.
14 */
15
16#ifndef _LINUX_JBD_H
17#define _LINUX_JBD_H
18
19/* Allow this file to be included directly into e2fsprogs */
20#ifndef __KERNEL__
21#include "jfs_compat.h"
22#define JFS_DEBUG
23#define jfs_debug jbd_debug
24#else
25
26#include <linux/types.h>
27#include <linux/buffer_head.h>
28#include <linux/journal-head.h>
29#include <linux/stddef.h>
30#include <linux/mutex.h>
31#include <linux/timer.h>
32#include <linux/lockdep.h>
33#include <linux/slab.h>
34
35#define journal_oom_retry 1
36
37/*
38 * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
39 * certain classes of error which can occur due to failed IOs. Under
40 * normal use we want ext3 to continue after such errors, because
41 * hardware _can_ fail, but for debugging purposes when running tests on
42 * known-good hardware we may want to trap these errors.
43 */
44#undef JBD_PARANOID_IOFAIL
45
46/*
47 * The default maximum commit age, in seconds.
48 */
49#define JBD_DEFAULT_MAX_COMMIT_AGE 5
50
51#ifdef CONFIG_JBD_DEBUG
52/*
53 * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
54 * consistency checks. By default we don't do this unless
55 * CONFIG_JBD_DEBUG is on.
56 */
57#define JBD_EXPENSIVE_CHECKING
58extern u8 journal_enable_debug;
59
60void __jbd_debug(int level, const char *file, const char *func,
61 unsigned int line, const char *fmt, ...);
62
63#define jbd_debug(n, fmt, a...) \
64 __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
65#else
66#define jbd_debug(n, fmt, a...) /**/
67#endif
68
69static inline void *jbd_alloc(size_t size, gfp_t flags)
70{
71 return (void *)__get_free_pages(flags, get_order(size));
72}
73
74static inline void jbd_free(void *ptr, size_t size)
75{
76 free_pages((unsigned long)ptr, get_order(size));
77}
78
79#define JFS_MIN_JOURNAL_BLOCKS 1024
80
81
82/**
83 * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
84 *
85 * All filesystem modifications made by the process go
86 * through this handle. Recursive operations (such as quota operations)
87 * are gathered into a single update.
88 *
89 * The buffer credits field is used to account for journaled buffers
90 * being modified by the running process. To ensure that there is
91 * enough log space for all outstanding operations, we need to limit the
92 * number of outstanding buffers possible at any time. When the
93 * operation completes, any buffer credits not used are credited back to
94 * the transaction, so that at all times we know how many buffers the
95 * outstanding updates on a transaction might possibly touch.
96 *
97 * This is an opaque datatype.
98 **/
99typedef struct handle_s handle_t; /* Atomic operation type */
100
101
102/**
103 * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
104 *
105 * journal_t is linked to from the fs superblock structure.
106 *
107 * We use the journal_t to keep track of all outstanding transaction
108 * activity on the filesystem, and to manage the state of the log
109 * writing process.
110 *
111 * This is an opaque datatype.
112 **/
113typedef struct journal_s journal_t; /* Journal control structure */
114#endif
115
116/*
117 * Internal structures used by the logging mechanism:
118 */
119
120#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
121
122/*
123 * On-disk structures
124 */
125
126/*
127 * Descriptor block types:
128 */
129
130#define JFS_DESCRIPTOR_BLOCK 1
131#define JFS_COMMIT_BLOCK 2
132#define JFS_SUPERBLOCK_V1 3
133#define JFS_SUPERBLOCK_V2 4
134#define JFS_REVOKE_BLOCK 5
135
136/*
137 * Standard header for all descriptor blocks:
138 */
139typedef struct journal_header_s
140{
141 __be32 h_magic;
142 __be32 h_blocktype;
143 __be32 h_sequence;
144} journal_header_t;
145
146
147/*
148 * The block tag: used to describe a single buffer in the journal
149 */
150typedef struct journal_block_tag_s
151{
152 __be32 t_blocknr; /* The on-disk block number */
153 __be32 t_flags; /* See below */
154} journal_block_tag_t;
155
156/*
157 * The revoke descriptor: used on disk to describe a series of blocks to
158 * be revoked from the log
159 */
160typedef struct journal_revoke_header_s
161{
162 journal_header_t r_header;
163 __be32 r_count; /* Count of bytes used in the block */
164} journal_revoke_header_t;
165
166
167/* Definitions for the journal tag flags word: */
168#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
169#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
170#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
171#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
172
173
174/*
175 * The journal superblock. All fields are in big-endian byte order.
176 */
177typedef struct journal_superblock_s
178{
179/* 0x0000 */
180 journal_header_t s_header;
181
182/* 0x000C */
183 /* Static information describing the journal */
184 __be32 s_blocksize; /* journal device blocksize */
185 __be32 s_maxlen; /* total blocks in journal file */
186 __be32 s_first; /* first block of log information */
187
188/* 0x0018 */
189 /* Dynamic information describing the current state of the log */
190 __be32 s_sequence; /* first commit ID expected in log */
191 __be32 s_start; /* blocknr of start of log */
192
193/* 0x0020 */
194 /* Error value, as set by journal_abort(). */
195 __be32 s_errno;
196
197/* 0x0024 */
198 /* Remaining fields are only valid in a version-2 superblock */
199 __be32 s_feature_compat; /* compatible feature set */
200 __be32 s_feature_incompat; /* incompatible feature set */
201 __be32 s_feature_ro_compat; /* readonly-compatible feature set */
202/* 0x0030 */
203 __u8 s_uuid[16]; /* 128-bit uuid for journal */
204
205/* 0x0040 */
206 __be32 s_nr_users; /* Nr of filesystems sharing log */
207
208 __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
209
210/* 0x0048 */
211 __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
212 __be32 s_max_trans_data; /* Limit of data blocks per trans. */
213
214/* 0x0050 */
215 __u32 s_padding[44];
216
217/* 0x0100 */
218 __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
219/* 0x0400 */
220} journal_superblock_t;
221
222#define JFS_HAS_COMPAT_FEATURE(j,mask) \
223 ((j)->j_format_version >= 2 && \
224 ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
225#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
226 ((j)->j_format_version >= 2 && \
227 ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
228#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
229 ((j)->j_format_version >= 2 && \
230 ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
231
232#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
233
234/* Features known to this kernel version: */
235#define JFS_KNOWN_COMPAT_FEATURES 0
236#define JFS_KNOWN_ROCOMPAT_FEATURES 0
237#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
238
239#ifdef __KERNEL__
240
241#include <linux/fs.h>
242#include <linux/sched.h>
243
244enum jbd_state_bits {
245 BH_JBD /* Has an attached ext3 journal_head */
246 = BH_PrivateStart,
247 BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
248 BH_Freed, /* Has been freed (truncated) */
249 BH_Revoked, /* Has been revoked from the log */
250 BH_RevokeValid, /* Revoked flag is valid */
251 BH_JBDDirty, /* Is dirty but journaled */
252 BH_State, /* Pins most journal_head state */
253 BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
254 BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
255 BH_JBDPrivateStart, /* First bit available for private use by FS */
256};
257
258BUFFER_FNS(JBD, jbd)
259BUFFER_FNS(JWrite, jwrite)
260BUFFER_FNS(JBDDirty, jbddirty)
261TAS_BUFFER_FNS(JBDDirty, jbddirty)
262BUFFER_FNS(Revoked, revoked)
263TAS_BUFFER_FNS(Revoked, revoked)
264BUFFER_FNS(RevokeValid, revokevalid)
265TAS_BUFFER_FNS(RevokeValid, revokevalid)
266BUFFER_FNS(Freed, freed)
267
268#include <linux/jbd_common.h>
269
270#define J_ASSERT(assert) BUG_ON(!(assert))
271
272#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
273#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
274
275#if defined(JBD_PARANOID_IOFAIL)
276#define J_EXPECT(expr, why...) J_ASSERT(expr)
277#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
278#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
279#else
280#define __journal_expect(expr, why...) \
281 ({ \
282 int val = (expr); \
283 if (!val) { \
284 printk(KERN_ERR \
285 "EXT3-fs unexpected failure: %s;\n",# expr); \
286 printk(KERN_ERR why "\n"); \
287 } \
288 val; \
289 })
290#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
291#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
292#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
293#endif
294
295struct jbd_revoke_table_s;
296
297/**
298 * struct handle_s - this is the concrete type associated with handle_t.
299 * @h_transaction: Which compound transaction is this update a part of?
300 * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
301 * @h_ref: Reference count on this handle
302 * @h_err: Field for caller's use to track errors through large fs operations
303 * @h_sync: flag for sync-on-close
304 * @h_jdata: flag to force data journaling
305 * @h_aborted: flag indicating fatal error on handle
306 * @h_lockdep_map: lockdep info for debugging lock problems
307 */
308struct handle_s
309{
310 /* Which compound transaction is this update a part of? */
311 transaction_t *h_transaction;
312
313 /* Number of remaining buffers we are allowed to dirty: */
314 int h_buffer_credits;
315
316 /* Reference count on this handle */
317 int h_ref;
318
319 /* Field for caller's use to track errors through large fs */
320 /* operations */
321 int h_err;
322
323 /* Flags [no locking] */
324 unsigned int h_sync: 1; /* sync-on-close */
325 unsigned int h_jdata: 1; /* force data journaling */
326 unsigned int h_aborted: 1; /* fatal error on handle */
327
328#ifdef CONFIG_DEBUG_LOCK_ALLOC
329 struct lockdep_map h_lockdep_map;
330#endif
331};
332
333
334/* The transaction_t type is the guts of the journaling mechanism. It
335 * tracks a compound transaction through its various states:
336 *
337 * RUNNING: accepting new updates
338 * LOCKED: Updates still running but we don't accept new ones
339 * RUNDOWN: Updates are tidying up but have finished requesting
340 * new buffers to modify (state not used for now)
341 * FLUSH: All updates complete, but we are still writing to disk
342 * COMMIT: All data on disk, writing commit record
343 * FINISHED: We still have to keep the transaction for checkpointing.
344 *
345 * The transaction keeps track of all of the buffers modified by a
346 * running transaction, and all of the buffers committed but not yet
347 * flushed to home for finished transactions.
348 */
349
350/*
351 * Lock ranking:
352 *
353 * j_list_lock
354 * ->jbd_lock_bh_journal_head() (This is "innermost")
355 *
356 * j_state_lock
357 * ->jbd_lock_bh_state()
358 *
359 * jbd_lock_bh_state()
360 * ->j_list_lock
361 *
362 * j_state_lock
363 * ->t_handle_lock
364 *
365 * j_state_lock
366 * ->j_list_lock (journal_unmap_buffer)
367 *
368 */
369
370struct transaction_s
371{
372 /* Pointer to the journal for this transaction. [no locking] */
373 journal_t *t_journal;
374
375 /* Sequence number for this transaction [no locking] */
376 tid_t t_tid;
377
378 /*
379 * Transaction's current state
380 * [no locking - only kjournald alters this]
381 * [j_list_lock] guards transition of a transaction into T_FINISHED
382 * state and subsequent call of __journal_drop_transaction()
383 * FIXME: needs barriers
384 * KLUDGE: [use j_state_lock]
385 */
386 enum {
387 T_RUNNING,
388 T_LOCKED,
389 T_FLUSH,
390 T_COMMIT,
391 T_COMMIT_RECORD,
392 T_FINISHED
393 } t_state;
394
395 /*
396 * Where in the log does this transaction's commit start? [no locking]
397 */
398 unsigned int t_log_start;
399
400 /* Number of buffers on the t_buffers list [j_list_lock] */
401 int t_nr_buffers;
402
403 /*
404 * Doubly-linked circular list of all buffers reserved but not yet
405 * modified by this transaction [j_list_lock]
406 */
407 struct journal_head *t_reserved_list;
408
409 /*
410 * Doubly-linked circular list of all buffers under writeout during
411 * commit [j_list_lock]
412 */
413 struct journal_head *t_locked_list;
414
415 /*
416 * Doubly-linked circular list of all metadata buffers owned by this
417 * transaction [j_list_lock]
418 */
419 struct journal_head *t_buffers;
420
421 /*
422 * Doubly-linked circular list of all data buffers still to be
423 * flushed before this transaction can be committed [j_list_lock]
424 */
425 struct journal_head *t_sync_datalist;
426
427 /*
428 * Doubly-linked circular list of all forget buffers (superseded
429 * buffers which we can un-checkpoint once this transaction commits)
430 * [j_list_lock]
431 */
432 struct journal_head *t_forget;
433
434 /*
435 * Doubly-linked circular list of all buffers still to be flushed before
436 * this transaction can be checkpointed. [j_list_lock]
437 */
438 struct journal_head *t_checkpoint_list;
439
440 /*
441 * Doubly-linked circular list of all buffers submitted for IO while
442 * checkpointing. [j_list_lock]
443 */
444 struct journal_head *t_checkpoint_io_list;
445
446 /*
447 * Doubly-linked circular list of temporary buffers currently undergoing
448 * IO in the log [j_list_lock]
449 */
450 struct journal_head *t_iobuf_list;
451
452 /*
453 * Doubly-linked circular list of metadata buffers being shadowed by log
454 * IO. The IO buffers on the iobuf list and the shadow buffers on this
455 * list match each other one for one at all times. [j_list_lock]
456 */
457 struct journal_head *t_shadow_list;
458
459 /*
460 * Doubly-linked circular list of control buffers being written to the
461 * log. [j_list_lock]
462 */
463 struct journal_head *t_log_list;
464
465 /*
466 * Protects info related to handles
467 */
468 spinlock_t t_handle_lock;
469
470 /*
471 * Number of outstanding updates running on this transaction
472 * [t_handle_lock]
473 */
474 int t_updates;
475
476 /*
477 * Number of buffers reserved for use by all handles in this transaction
478 * handle but not yet modified. [t_handle_lock]
479 */
480 int t_outstanding_credits;
481
482 /*
483 * Forward and backward links for the circular list of all transactions
484 * awaiting checkpoint. [j_list_lock]
485 */
486 transaction_t *t_cpnext, *t_cpprev;
487
488 /*
489 * When will the transaction expire (become due for commit), in jiffies?
490 * [no locking]
491 */
492 unsigned long t_expires;
493
494 /*
495 * When this transaction started, in nanoseconds [no locking]
496 */
497 ktime_t t_start_time;
498
499 /*
500 * How many handles used this transaction? [t_handle_lock]
501 */
502 int t_handle_count;
503};
504
505/**
506 * struct journal_s - this is the concrete type associated with journal_t.
507 * @j_flags: General journaling state flags
508 * @j_errno: Is there an outstanding uncleared error on the journal (from a
509 * prior abort)?
510 * @j_sb_buffer: First part of superblock buffer
511 * @j_superblock: Second part of superblock buffer
512 * @j_format_version: Version of the superblock format
513 * @j_state_lock: Protect the various scalars in the journal
514 * @j_barrier_count: Number of processes waiting to create a barrier lock
515 * @j_running_transaction: The current running transaction..
516 * @j_committing_transaction: the transaction we are pushing to disk
517 * @j_checkpoint_transactions: a linked circular list of all transactions
518 * waiting for checkpointing
519 * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
520 * to start committing, or for a barrier lock to be released
521 * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
522 * @j_wait_done_commit: Wait queue for waiting for commit to complete
523 * @j_wait_checkpoint: Wait queue to trigger checkpointing
524 * @j_wait_commit: Wait queue to trigger commit
525 * @j_wait_updates: Wait queue to wait for updates to complete
526 * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
527 * @j_head: Journal head - identifies the first unused block in the journal
528 * @j_tail: Journal tail - identifies the oldest still-used block in the
529 * journal.
530 * @j_free: Journal free - how many free blocks are there in the journal?
531 * @j_first: The block number of the first usable block
532 * @j_last: The block number one beyond the last usable block
533 * @j_dev: Device where we store the journal
534 * @j_blocksize: blocksize for the location where we store the journal.
535 * @j_blk_offset: starting block offset for into the device where we store the
536 * journal
537 * @j_fs_dev: Device which holds the client fs. For internal journal this will
538 * be equal to j_dev
539 * @j_maxlen: Total maximum capacity of the journal region on disk.
540 * @j_list_lock: Protects the buffer lists and internal buffer state.
541 * @j_inode: Optional inode where we store the journal. If present, all journal
542 * block numbers are mapped into this inode via bmap().
543 * @j_tail_sequence: Sequence number of the oldest transaction in the log
544 * @j_transaction_sequence: Sequence number of the next transaction to grant
545 * @j_commit_sequence: Sequence number of the most recently committed
546 * transaction
547 * @j_commit_request: Sequence number of the most recent transaction wanting
548 * commit
549 * @j_commit_waited: Sequence number of the most recent transaction someone
550 * is waiting for to commit.
551 * @j_uuid: Uuid of client object.
552 * @j_task: Pointer to the current commit thread for this journal
553 * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
554 * single compound commit transaction
555 * @j_commit_interval: What is the maximum transaction lifetime before we begin
556 * a commit?
557 * @j_commit_timer: The timer used to wakeup the commit thread
558 * @j_revoke_lock: Protect the revoke table
559 * @j_revoke: The revoke table - maintains the list of revoked blocks in the
560 * current transaction.
561 * @j_revoke_table: alternate revoke tables for j_revoke
562 * @j_wbuf: array of buffer_heads for journal_commit_transaction
563 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
564 * number that will fit in j_blocksize
565 * @j_last_sync_writer: most recent pid which did a synchronous write
566 * @j_average_commit_time: the average amount of time in nanoseconds it
567 * takes to commit a transaction to the disk.
568 * @j_private: An opaque pointer to fs-private information.
569 */
570
571struct journal_s
572{
573 /* General journaling state flags [j_state_lock] */
574 unsigned long j_flags;
575
576 /*
577 * Is there an outstanding uncleared error on the journal (from a prior
578 * abort)? [j_state_lock]
579 */
580 int j_errno;
581
582 /* The superblock buffer */
583 struct buffer_head *j_sb_buffer;
584 journal_superblock_t *j_superblock;
585
586 /* Version of the superblock format */
587 int j_format_version;
588
589 /*
590 * Protect the various scalars in the journal
591 */
592 spinlock_t j_state_lock;
593
594 /*
595 * Number of processes waiting to create a barrier lock [j_state_lock]
596 */
597 int j_barrier_count;
598
599 /*
600 * Transactions: The current running transaction...
601 * [j_state_lock] [caller holding open handle]
602 */
603 transaction_t *j_running_transaction;
604
605 /*
606 * the transaction we are pushing to disk
607 * [j_state_lock] [caller holding open handle]
608 */
609 transaction_t *j_committing_transaction;
610
611 /*
612 * ... and a linked circular list of all transactions waiting for
613 * checkpointing. [j_list_lock]
614 */
615 transaction_t *j_checkpoint_transactions;
616
617 /*
618 * Wait queue for waiting for a locked transaction to start committing,
619 * or for a barrier lock to be released
620 */
621 wait_queue_head_t j_wait_transaction_locked;
622
623 /* Wait queue for waiting for checkpointing to complete */
624 wait_queue_head_t j_wait_logspace;
625
626 /* Wait queue for waiting for commit to complete */
627 wait_queue_head_t j_wait_done_commit;
628
629 /* Wait queue to trigger checkpointing */
630 wait_queue_head_t j_wait_checkpoint;
631
632 /* Wait queue to trigger commit */
633 wait_queue_head_t j_wait_commit;
634
635 /* Wait queue to wait for updates to complete */
636 wait_queue_head_t j_wait_updates;
637
638 /* Semaphore for locking against concurrent checkpoints */
639 struct mutex j_checkpoint_mutex;
640
641 /*
642 * Journal head: identifies the first unused block in the journal.
643 * [j_state_lock]
644 */
645 unsigned int j_head;
646
647 /*
648 * Journal tail: identifies the oldest still-used block in the journal.
649 * [j_state_lock]
650 */
651 unsigned int j_tail;
652
653 /*
654 * Journal free: how many free blocks are there in the journal?
655 * [j_state_lock]
656 */
657 unsigned int j_free;
658
659 /*
660 * Journal start and end: the block numbers of the first usable block
661 * and one beyond the last usable block in the journal. [j_state_lock]
662 */
663 unsigned int j_first;
664 unsigned int j_last;
665
666 /*
667 * Device, blocksize and starting block offset for the location where we
668 * store the journal.
669 */
670 struct block_device *j_dev;
671 int j_blocksize;
672 unsigned int j_blk_offset;
673
674 /*
675 * Device which holds the client fs. For internal journal this will be
676 * equal to j_dev.
677 */
678 struct block_device *j_fs_dev;
679
680 /* Total maximum capacity of the journal region on disk. */
681 unsigned int j_maxlen;
682
683 /*
684 * Protects the buffer lists and internal buffer state.
685 */
686 spinlock_t j_list_lock;
687
688 /* Optional inode where we store the journal. If present, all */
689 /* journal block numbers are mapped into this inode via */
690 /* bmap(). */
691 struct inode *j_inode;
692
693 /*
694 * Sequence number of the oldest transaction in the log [j_state_lock]
695 */
696 tid_t j_tail_sequence;
697
698 /*
699 * Sequence number of the next transaction to grant [j_state_lock]
700 */
701 tid_t j_transaction_sequence;
702
703 /*
704 * Sequence number of the most recently committed transaction
705 * [j_state_lock].
706 */
707 tid_t j_commit_sequence;
708
709 /*
710 * Sequence number of the most recent transaction wanting commit
711 * [j_state_lock]
712 */
713 tid_t j_commit_request;
714
715 /*
716 * Sequence number of the most recent transaction someone is waiting
717 * for to commit.
718 * [j_state_lock]
719 */
720 tid_t j_commit_waited;
721
722 /*
723 * Journal uuid: identifies the object (filesystem, LVM volume etc)
724 * backed by this journal. This will eventually be replaced by an array
725 * of uuids, allowing us to index multiple devices within a single
726 * journal and to perform atomic updates across them.
727 */
728 __u8 j_uuid[16];
729
730 /* Pointer to the current commit thread for this journal */
731 struct task_struct *j_task;
732
733 /*
734 * Maximum number of metadata buffers to allow in a single compound
735 * commit transaction
736 */
737 int j_max_transaction_buffers;
738
739 /*
740 * What is the maximum transaction lifetime before we begin a commit?
741 */
742 unsigned long j_commit_interval;
743
744 /* The timer used to wakeup the commit thread: */
745 struct timer_list j_commit_timer;
746
747 /*
748 * The revoke table: maintains the list of revoked blocks in the
749 * current transaction. [j_revoke_lock]
750 */
751 spinlock_t j_revoke_lock;
752 struct jbd_revoke_table_s *j_revoke;
753 struct jbd_revoke_table_s *j_revoke_table[2];
754
755 /*
756 * array of bhs for journal_commit_transaction
757 */
758 struct buffer_head **j_wbuf;
759 int j_wbufsize;
760
761 /*
762 * this is the pid of the last person to run a synchronous operation
763 * through the journal.
764 */
765 pid_t j_last_sync_writer;
766
767 /*
768 * the average amount of time in nanoseconds it takes to commit a
769 * transaction to the disk. [j_state_lock]
770 */
771 u64 j_average_commit_time;
772
773 /*
774 * An opaque pointer to fs-private information. ext3 puts its
775 * superblock pointer here
776 */
777 void *j_private;
778};
779
780/*
781 * Journal flag definitions
782 */
783#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
784#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
785#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
786#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
787#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
788#define JFS_BARRIER 0x020 /* Use IDE barriers */
789#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
790 * data write error in ordered
791 * mode */
792
793/*
794 * Function declarations for the journaling transaction and buffer
795 * management
796 */
797
798/* Filing buffers */
799extern void journal_unfile_buffer(journal_t *, struct journal_head *);
800extern void __journal_unfile_buffer(struct journal_head *);
801extern void __journal_refile_buffer(struct journal_head *);
802extern void journal_refile_buffer(journal_t *, struct journal_head *);
803extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
804extern void __journal_free_buffer(struct journal_head *bh);
805extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
806extern void __journal_clean_data_list(transaction_t *transaction);
807
808/* Log buffer allocation */
809extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
810int journal_next_log_block(journal_t *, unsigned int *);
811
812/* Commit management */
813extern void journal_commit_transaction(journal_t *);
814
815/* Checkpoint list management */
816int __journal_clean_checkpoint_list(journal_t *journal);
817int __journal_remove_checkpoint(struct journal_head *);
818void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
819
820/* Buffer IO */
821extern int
822journal_write_metadata_buffer(transaction_t *transaction,
823 struct journal_head *jh_in,
824 struct journal_head **jh_out,
825 unsigned int blocknr);
826
827/* Transaction locking */
828extern void __wait_on_journal (journal_t *);
829
830/*
831 * Journal locking.
832 *
833 * We need to lock the journal during transaction state changes so that nobody
834 * ever tries to take a handle on the running transaction while we are in the
835 * middle of moving it to the commit phase. j_state_lock does this.
836 *
837 * Note that the locking is completely interrupt unsafe. We never touch
838 * journal structures from interrupts.
839 */
840
841static inline handle_t *journal_current_handle(void)
842{
843 return current->journal_info;
844}
845
846/* The journaling code user interface:
847 *
848 * Create and destroy handles
849 * Register buffer modifications against the current transaction.
850 */
851
852extern handle_t *journal_start(journal_t *, int nblocks);
853extern int journal_restart (handle_t *, int nblocks);
854extern int journal_extend (handle_t *, int nblocks);
855extern int journal_get_write_access(handle_t *, struct buffer_head *);
856extern int journal_get_create_access (handle_t *, struct buffer_head *);
857extern int journal_get_undo_access(handle_t *, struct buffer_head *);
858extern int journal_dirty_data (handle_t *, struct buffer_head *);
859extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
860extern void journal_release_buffer (handle_t *, struct buffer_head *);
861extern int journal_forget (handle_t *, struct buffer_head *);
862extern void journal_sync_buffer (struct buffer_head *);
863extern void journal_invalidatepage(journal_t *,
864 struct page *, unsigned int, unsigned int);
865extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
866extern int journal_stop(handle_t *);
867extern int journal_flush (journal_t *);
868extern void journal_lock_updates (journal_t *);
869extern void journal_unlock_updates (journal_t *);
870
871extern journal_t * journal_init_dev(struct block_device *bdev,
872 struct block_device *fs_dev,
873 int start, int len, int bsize);
874extern journal_t * journal_init_inode (struct inode *);
875extern int journal_update_format (journal_t *);
876extern int journal_check_used_features
877 (journal_t *, unsigned long, unsigned long, unsigned long);
878extern int journal_check_available_features
879 (journal_t *, unsigned long, unsigned long, unsigned long);
880extern int journal_set_features
881 (journal_t *, unsigned long, unsigned long, unsigned long);
882extern int journal_create (journal_t *);
883extern int journal_load (journal_t *journal);
884extern int journal_destroy (journal_t *);
885extern int journal_recover (journal_t *journal);
886extern int journal_wipe (journal_t *, int);
887extern int journal_skip_recovery (journal_t *);
888extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
889 int);
890extern void journal_abort (journal_t *, int);
891extern int journal_errno (journal_t *);
892extern void journal_ack_err (journal_t *);
893extern int journal_clear_err (journal_t *);
894extern int journal_bmap(journal_t *, unsigned int, unsigned int *);
895extern int journal_force_commit(journal_t *);
896
897/*
898 * journal_head management
899 */
900struct journal_head *journal_add_journal_head(struct buffer_head *bh);
901struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
902void journal_put_journal_head(struct journal_head *jh);
903
904/*
905 * handle management
906 */
907extern struct kmem_cache *jbd_handle_cache;
908
909static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
910{
911 return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
912}
913
914static inline void jbd_free_handle(handle_t *handle)
915{
916 kmem_cache_free(jbd_handle_cache, handle);
917}
918
919/* Primary revoke support */
920#define JOURNAL_REVOKE_DEFAULT_HASH 256
921extern int journal_init_revoke(journal_t *, int);
922extern void journal_destroy_revoke_caches(void);
923extern int journal_init_revoke_caches(void);
924
925extern void journal_destroy_revoke(journal_t *);
926extern int journal_revoke (handle_t *,
927 unsigned int, struct buffer_head *);
928extern int journal_cancel_revoke(handle_t *, struct journal_head *);
929extern void journal_write_revoke_records(journal_t *,
930 transaction_t *, int);
931
932/* Recovery revoke support */
933extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
934extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
935extern void journal_clear_revoke(journal_t *);
936extern void journal_switch_revoke_table(journal_t *journal);
937extern void journal_clear_buffer_revoked_flags(journal_t *journal);
938
939/*
940 * The log thread user interface:
941 *
942 * Request space in the current transaction, and force transaction commit
943 * transitions on demand.
944 */
945
946int __log_space_left(journal_t *); /* Called with journal locked */
947int log_start_commit(journal_t *journal, tid_t tid);
948int __log_start_commit(journal_t *journal, tid_t tid);
949int journal_start_commit(journal_t *journal, tid_t *tid);
950int journal_force_commit_nested(journal_t *journal);
951int log_wait_commit(journal_t *journal, tid_t tid);
952int log_do_checkpoint(journal_t *journal);
953int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
954
955void __log_wait_for_space(journal_t *journal);
956extern void __journal_drop_transaction(journal_t *, transaction_t *);
957extern int cleanup_journal_tail(journal_t *);
958
959/*
960 * is_journal_abort
961 *
962 * Simple test wrapper function to test the JFS_ABORT state flag. This
963 * bit, when set, indicates that we have had a fatal error somewhere,
964 * either inside the journaling layer or indicated to us by the client
965 * (eg. ext3), and that we and should not commit any further
966 * transactions.
967 */
968
969static inline int is_journal_aborted(journal_t *journal)
970{
971 return journal->j_flags & JFS_ABORT;
972}
973
974static inline int is_handle_aborted(handle_t *handle)
975{
976 if (handle->h_aborted)
977 return 1;
978 return is_journal_aborted(handle->h_transaction->t_journal);
979}
980
981static inline void journal_abort_handle(handle_t *handle)
982{
983 handle->h_aborted = 1;
984}
985
986#endif /* __KERNEL__ */
987
988/* Comparison functions for transaction IDs: perform comparisons using
989 * modulo arithmetic so that they work over sequence number wraps. */
990
991static inline int tid_gt(tid_t x, tid_t y)
992{
993 int difference = (x - y);
994 return (difference > 0);
995}
996
997static inline int tid_geq(tid_t x, tid_t y)
998{
999 int difference = (x - y);
1000 return (difference >= 0);
1001}
1002
1003extern int journal_blocks_per_page(struct inode *inode);
1004
1005/*
1006 * Return the minimum number of blocks which must be free in the journal
1007 * before a new transaction may be started. Must be called under j_state_lock.
1008 */
1009static inline int jbd_space_needed(journal_t *journal)
1010{
1011 int nblocks = journal->j_max_transaction_buffers;
1012 if (journal->j_committing_transaction)
1013 nblocks += journal->j_committing_transaction->
1014 t_outstanding_credits;
1015 return nblocks;
1016}
1017
1018/*
1019 * Definitions which augment the buffer_head layer
1020 */
1021
1022/* journaling buffer types */
1023#define BJ_None 0 /* Not journaled */
1024#define BJ_SyncData 1 /* Normal data: flush before commit */
1025#define BJ_Metadata 2 /* Normal journaled metadata */
1026#define BJ_Forget 3 /* Buffer superseded by this transaction */
1027#define BJ_IO 4 /* Buffer is for temporary IO use */
1028#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
1029#define BJ_LogCtl 6 /* Buffer contains log descriptors */
1030#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
1031#define BJ_Locked 8 /* Locked for I/O during commit */
1032#define BJ_Types 9
1033
1034extern int jbd_blocks_per_page(struct inode *inode);
1035
1036#ifdef __KERNEL__
1037
1038#define buffer_trace_init(bh) do {} while (0)
1039#define print_buffer_fields(bh) do {} while (0)
1040#define print_buffer_trace(bh) do {} while (0)
1041#define BUFFER_TRACE(bh, info) do {} while (0)
1042#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
1043#define JBUFFER_TRACE(jh, info) do {} while (0)
1044
1045#endif /* __KERNEL__ */
1046
1047#endif /* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index edb640ae9a94..df07e78487d5 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/bit_spinlock.h>
32#include <crypto/hash.h> 33#include <crypto/hash.h>
33#endif 34#endif
34 35
@@ -336,7 +337,45 @@ BUFFER_FNS(Freed, freed)
336BUFFER_FNS(Shadow, shadow) 337BUFFER_FNS(Shadow, shadow)
337BUFFER_FNS(Verified, verified) 338BUFFER_FNS(Verified, verified)
338 339
339#include <linux/jbd_common.h> 340static inline struct buffer_head *jh2bh(struct journal_head *jh)
341{
342 return jh->b_bh;
343}
344
345static inline struct journal_head *bh2jh(struct buffer_head *bh)
346{
347 return bh->b_private;
348}
349
350static inline void jbd_lock_bh_state(struct buffer_head *bh)
351{
352 bit_spin_lock(BH_State, &bh->b_state);
353}
354
355static inline int jbd_trylock_bh_state(struct buffer_head *bh)
356{
357 return bit_spin_trylock(BH_State, &bh->b_state);
358}
359
360static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
361{
362 return bit_spin_is_locked(BH_State, &bh->b_state);
363}
364
365static inline void jbd_unlock_bh_state(struct buffer_head *bh)
366{
367 bit_spin_unlock(BH_State, &bh->b_state);
368}
369
370static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
371{
372 bit_spin_lock(BH_JournalHead, &bh->b_state);
373}
374
375static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
376{
377 bit_spin_unlock(BH_JournalHead, &bh->b_state);
378}
340 379
341#define J_ASSERT(assert) BUG_ON(!(assert)) 380#define J_ASSERT(assert) BUG_ON(!(assert))
342 381
@@ -1042,8 +1081,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1042extern void jbd2_journal_commit_transaction(journal_t *); 1081extern void jbd2_journal_commit_transaction(journal_t *);
1043 1082
1044/* Checkpoint list management */ 1083/* Checkpoint list management */
1045void __jbd2_journal_clean_checkpoint_list(journal_t *journal); 1084void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
1046int __jbd2_journal_remove_checkpoint(struct journal_head *); 1085int __jbd2_journal_remove_checkpoint(struct journal_head *);
1086void jbd2_journal_destroy_checkpoint(journal_t *journal);
1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); 1087void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
1048 1088
1049 1089
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc53432355f..000000000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
1#ifndef _LINUX_JBD_STATE_H
2#define _LINUX_JBD_STATE_H
3
4#include <linux/bit_spinlock.h>
5
6static inline struct buffer_head *jh2bh(struct journal_head *jh)
7{
8 return jh->b_bh;
9}
10
11static inline struct journal_head *bh2jh(struct buffer_head *bh)
12{
13 return bh->b_private;
14}
15
16static inline void jbd_lock_bh_state(struct buffer_head *bh)
17{
18 bit_spin_lock(BH_State, &bh->b_state);
19}
20
21static inline int jbd_trylock_bh_state(struct buffer_head *bh)
22{
23 return bit_spin_trylock(BH_State, &bh->b_state);
24}
25
26static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
27{
28 return bit_spin_is_locked(BH_State, &bh->b_state);
29}
30
31static inline void jbd_unlock_bh_state(struct buffer_head *bh)
32{
33 bit_spin_unlock(BH_State, &bh->b_state);
34}
35
36static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
37{
38 bit_spin_lock(BH_JournalHead, &bh->b_state);
39}
40
41static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
42{
43 bit_spin_unlock(BH_JournalHead, &bh->b_state);
44}
45
46#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 535fd3bb1ba8..5fdc55312334 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -351,7 +351,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
351 * directly here and from __msecs_to_jiffies() in the case where 351 * directly here and from __msecs_to_jiffies() in the case where
352 * constant folding is not possible. 352 * constant folding is not possible.
353 */ 353 */
354static inline unsigned long msecs_to_jiffies(const unsigned int m) 354static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
355{ 355{
356 if (__builtin_constant_p(m)) { 356 if (__builtin_constant_p(m)) {
357 if ((int)m < 0) 357 if ((int)m < 0)
@@ -363,18 +363,11 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m)
363} 363}
364 364
365extern unsigned long __usecs_to_jiffies(const unsigned int u); 365extern unsigned long __usecs_to_jiffies(const unsigned int u);
366#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) 366#if !(USEC_PER_SEC % HZ)
367static inline unsigned long _usecs_to_jiffies(const unsigned int u) 367static inline unsigned long _usecs_to_jiffies(const unsigned int u)
368{ 368{
369 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); 369 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
370} 370}
371#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
372static inline unsigned long _usecs_to_jiffies(const unsigned int u)
373{
374 return u * (HZ / USEC_PER_SEC);
375}
376static inline unsigned long _usecs_to_jiffies(const unsigned int u)
377{
378#else 371#else
379static inline unsigned long _usecs_to_jiffies(const unsigned int u) 372static inline unsigned long _usecs_to_jiffies(const unsigned int u)
380{ 373{
@@ -405,7 +398,7 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
405 * directly here and from __msecs_to_jiffies() in the case where 398 * directly here and from __msecs_to_jiffies() in the case where
406 * constant folding is not possible. 399 * constant folding is not possible.
407 */ 400 */
408static inline unsigned long usecs_to_jiffies(const unsigned int u) 401static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
409{ 402{
410 if (__builtin_constant_p(u)) { 403 if (__builtin_constant_p(u)) {
411 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) 404 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
@@ -416,9 +409,25 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u)
416 } 409 }
417} 410}
418 411
419extern unsigned long timespec_to_jiffies(const struct timespec *value); 412extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
420extern void jiffies_to_timespec(const unsigned long jiffies, 413extern void jiffies_to_timespec64(const unsigned long jiffies,
421 struct timespec *value); 414 struct timespec64 *value);
415static inline unsigned long timespec_to_jiffies(const struct timespec *value)
416{
417 struct timespec64 ts = timespec_to_timespec64(*value);
418
419 return timespec64_to_jiffies(&ts);
420}
421
422static inline void jiffies_to_timespec(const unsigned long jiffies,
423 struct timespec *value)
424{
425 struct timespec64 ts;
426
427 jiffies_to_timespec64(jiffies, &ts);
428 *value = timespec64_to_timespec(ts);
429}
430
422extern unsigned long timeval_to_jiffies(const struct timeval *value); 431extern unsigned long timeval_to_jiffies(const struct timeval *value);
423extern void jiffies_to_timeval(const unsigned long jiffies, 432extern void jiffies_to_timeval(const unsigned long jiffies,
424 struct timeval *value); 433 struct timeval *value);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473f226b..7f653e8f6690 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,52 @@
7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> 7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
9 * 9 *
10 * DEPRECATED API:
11 *
12 * The use of 'struct static_key' directly, is now DEPRECATED. In addition
13 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
14 *
15 * struct static_key false = STATIC_KEY_INIT_FALSE;
16 * struct static_key true = STATIC_KEY_INIT_TRUE;
17 * static_key_true()
18 * static_key_false()
19 *
20 * The updated API replacements are:
21 *
22 * DEFINE_STATIC_KEY_TRUE(key);
23 * DEFINE_STATIC_KEY_FALSE(key);
24 * static_key_likely()
25 * statick_key_unlikely()
26 *
10 * Jump labels provide an interface to generate dynamic branches using 27 * Jump labels provide an interface to generate dynamic branches using
11 * self-modifying code. Assuming toolchain and architecture support, the result 28 * self-modifying code. Assuming toolchain and architecture support, if we
12 * of a "if (static_key_false(&key))" statement is an unconditional branch (which 29 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
13 * defaults to false - and the true block is placed out of line). 30 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
31 * (which defaults to false - and the true block is placed out of line).
32 * Similarly, we can define an initially true key via
33 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
34 * "if (static_branch_unlikely(&key))", in which case we will generate an
35 * unconditional branch to the out-of-line true branch. Keys that are
36 * initially true or false can be using in both static_branch_unlikely()
37 * and static_branch_likely() statements.
38 *
39 * At runtime we can change the branch target by setting the key
40 * to true via a call to static_branch_enable(), or false using
41 * static_branch_disable(). If the direction of the branch is switched by
42 * these calls then we run-time modify the branch target via a
43 * no-op -> jump or jump -> no-op conversion. For example, for an
44 * initially false key that is used in an "if (static_branch_unlikely(&key))"
45 * statement, setting the key to true requires us to patch in a jump
46 * to the out-of-line of true branch.
14 * 47 *
15 * However at runtime we can change the branch target using 48 * In addtion to static_branch_{enable,disable}, we can also reference count
16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key 49 * the key or branch direction via static_branch_{inc,dec}. Thus,
17 * object, and for as long as there are references all branches referring to 50 * static_branch_inc() can be thought of as a 'make more true' and
18 * that particular key will point to the (out of line) true block. 51 * static_branch_dec() as a 'make more false'. The inc()/dec()
52 * interface is meant to be used exclusively from the inc()/dec() for a given
53 * key.
19 * 54 *
20 * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions 55 * Since this relies on modifying code, the branch modifying functions
21 * must be considered absolute slow paths (machine wide synchronization etc.). 56 * must be considered absolute slow paths (machine wide synchronization etc.).
22 * OTOH, since the affected branches are unconditional, their runtime overhead 57 * OTOH, since the affected branches are unconditional, their runtime overhead
23 * will be absolutely minimal, esp. in the default (off) case where the total 58 * will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +64,10 @@
29 * cause significant performance degradation. Struct static_key_deferred and 64 * cause significant performance degradation. Struct static_key_deferred and
30 * static_key_slow_dec_deferred() provide for this. 65 * static_key_slow_dec_deferred() provide for this.
31 * 66 *
32 * Lacking toolchain and or architecture support, jump labels fall back to a simple 67 * Lacking toolchain and or architecture support, static keys fall back to a
33 * conditional branch. 68 * simple conditional branch.
34 *
35 * struct static_key my_key = STATIC_KEY_INIT_TRUE;
36 *
37 * if (static_key_true(&my_key)) {
38 * }
39 * 69 *
40 * will result in the true case being in-line and starts the key with a single 70 * Additional babbling in: Documentation/static-keys.txt
41 * reference. Mixing static_key_true() and static_key_false() on the same key is not
42 * allowed.
43 *
44 * Not initializing the key (static data is initialized to 0s anyway) is the
45 * same as using STATIC_KEY_INIT_FALSE.
46 */ 71 */
47 72
48#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 73#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +111,8 @@ struct static_key {
86#ifndef __ASSEMBLY__ 111#ifndef __ASSEMBLY__
87 112
88enum jump_label_type { 113enum jump_label_type {
89 JUMP_LABEL_DISABLE = 0, 114 JUMP_LABEL_NOP = 0,
90 JUMP_LABEL_ENABLE, 115 JUMP_LABEL_JMP,
91}; 116};
92 117
93struct module; 118struct module;
@@ -101,33 +126,18 @@ static inline int static_key_count(struct static_key *key)
101 126
102#ifdef HAVE_JUMP_LABEL 127#ifdef HAVE_JUMP_LABEL
103 128
104#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL 129#define JUMP_TYPE_FALSE 0UL
105#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL 130#define JUMP_TYPE_TRUE 1UL
106#define JUMP_LABEL_TYPE_MASK 1UL 131#define JUMP_TYPE_MASK 1UL
107
108static
109inline struct jump_entry *jump_label_get_entries(struct static_key *key)
110{
111 return (struct jump_entry *)((unsigned long)key->entries
112 & ~JUMP_LABEL_TYPE_MASK);
113}
114
115static inline bool jump_label_get_branch_default(struct static_key *key)
116{
117 if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
118 JUMP_LABEL_TYPE_TRUE_BRANCH)
119 return true;
120 return false;
121}
122 132
123static __always_inline bool static_key_false(struct static_key *key) 133static __always_inline bool static_key_false(struct static_key *key)
124{ 134{
125 return arch_static_branch(key); 135 return arch_static_branch(key, false);
126} 136}
127 137
128static __always_inline bool static_key_true(struct static_key *key) 138static __always_inline bool static_key_true(struct static_key *key)
129{ 139{
130 return !static_key_false(key); 140 return !arch_static_branch(key, true);
131} 141}
132 142
133extern struct jump_entry __start___jump_table[]; 143extern struct jump_entry __start___jump_table[];
@@ -145,12 +155,12 @@ extern void static_key_slow_inc(struct static_key *key);
145extern void static_key_slow_dec(struct static_key *key); 155extern void static_key_slow_dec(struct static_key *key);
146extern void jump_label_apply_nops(struct module *mod); 156extern void jump_label_apply_nops(struct module *mod);
147 157
148#define STATIC_KEY_INIT_TRUE ((struct static_key) \ 158#define STATIC_KEY_INIT_TRUE \
149 { .enabled = ATOMIC_INIT(1), \ 159 { .enabled = ATOMIC_INIT(1), \
150 .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH }) 160 .entries = (void *)JUMP_TYPE_TRUE }
151#define STATIC_KEY_INIT_FALSE ((struct static_key) \ 161#define STATIC_KEY_INIT_FALSE \
152 { .enabled = ATOMIC_INIT(0), \ 162 { .enabled = ATOMIC_INIT(0), \
153 .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH }) 163 .entries = (void *)JUMP_TYPE_FALSE }
154 164
155#else /* !HAVE_JUMP_LABEL */ 165#else /* !HAVE_JUMP_LABEL */
156 166
@@ -198,10 +208,8 @@ static inline int jump_label_apply_nops(struct module *mod)
198 return 0; 208 return 0;
199} 209}
200 210
201#define STATIC_KEY_INIT_TRUE ((struct static_key) \ 211#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
202 { .enabled = ATOMIC_INIT(1) }) 212#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
203#define STATIC_KEY_INIT_FALSE ((struct static_key) \
204 { .enabled = ATOMIC_INIT(0) })
205 213
206#endif /* HAVE_JUMP_LABEL */ 214#endif /* HAVE_JUMP_LABEL */
207 215
@@ -213,6 +221,157 @@ static inline bool static_key_enabled(struct static_key *key)
213 return static_key_count(key) > 0; 221 return static_key_count(key) > 0;
214} 222}
215 223
224static inline void static_key_enable(struct static_key *key)
225{
226 int count = static_key_count(key);
227
228 WARN_ON_ONCE(count < 0 || count > 1);
229
230 if (!count)
231 static_key_slow_inc(key);
232}
233
234static inline void static_key_disable(struct static_key *key)
235{
236 int count = static_key_count(key);
237
238 WARN_ON_ONCE(count < 0 || count > 1);
239
240 if (count)
241 static_key_slow_dec(key);
242}
243
244/* -------------------------------------------------------------------------- */
245
246/*
247 * Two type wrappers around static_key, such that we can use compile time
248 * type differentiation to emit the right code.
249 *
250 * All the below code is macros in order to play type games.
251 */
252
253struct static_key_true {
254 struct static_key key;
255};
256
257struct static_key_false {
258 struct static_key key;
259};
260
261#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
262#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
263
264#define DEFINE_STATIC_KEY_TRUE(name) \
265 struct static_key_true name = STATIC_KEY_TRUE_INIT
266
267#define DEFINE_STATIC_KEY_FALSE(name) \
268 struct static_key_false name = STATIC_KEY_FALSE_INIT
269
270#ifdef HAVE_JUMP_LABEL
271
272/*
273 * Combine the right initial value (type) with the right branch order
274 * to generate the desired result.
275 *
276 *
277 * type\branch| likely (1) | unlikely (0)
278 * -----------+-----------------------+------------------
279 * | |
280 * true (1) | ... | ...
281 * | NOP | JMP L
282 * | <br-stmts> | 1: ...
283 * | L: ... |
284 * | |
285 * | | L: <br-stmts>
286 * | | jmp 1b
287 * | |
288 * -----------+-----------------------+------------------
289 * | |
290 * false (0) | ... | ...
291 * | JMP L | NOP
292 * | <br-stmts> | 1: ...
293 * | L: ... |
294 * | |
295 * | | L: <br-stmts>
296 * | | jmp 1b
297 * | |
298 * -----------+-----------------------+------------------
299 *
300 * The initial value is encoded in the LSB of static_key::entries,
301 * type: 0 = false, 1 = true.
302 *
303 * The branch type is encoded in the LSB of jump_entry::key,
304 * branch: 0 = unlikely, 1 = likely.
305 *
306 * This gives the following logic table:
307 *
308 * enabled type branch instuction
309 * -----------------------------+-----------
310 * 0 0 0 | NOP
311 * 0 0 1 | JMP
312 * 0 1 0 | NOP
313 * 0 1 1 | JMP
314 *
315 * 1 0 0 | JMP
316 * 1 0 1 | NOP
317 * 1 1 0 | JMP
318 * 1 1 1 | NOP
319 *
320 * Which gives the following functions:
321 *
322 * dynamic: instruction = enabled ^ branch
323 * static: instruction = type ^ branch
324 *
325 * See jump_label_type() / jump_label_init_type().
326 */
327
328extern bool ____wrong_branch_error(void);
329
330#define static_branch_likely(x) \
331({ \
332 bool branch; \
333 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
334 branch = !arch_static_branch(&(x)->key, true); \
335 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
336 branch = !arch_static_branch_jump(&(x)->key, true); \
337 else \
338 branch = ____wrong_branch_error(); \
339 branch; \
340})
341
342#define static_branch_unlikely(x) \
343({ \
344 bool branch; \
345 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
346 branch = arch_static_branch_jump(&(x)->key, false); \
347 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
348 branch = arch_static_branch(&(x)->key, false); \
349 else \
350 branch = ____wrong_branch_error(); \
351 branch; \
352})
353
354#else /* !HAVE_JUMP_LABEL */
355
356#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
357#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
358
359#endif /* HAVE_JUMP_LABEL */
360
361/*
362 * Advanced usage; refcount, branch is enabled when: count != 0
363 */
364
365#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
366#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
367
368/*
369 * Normal usage; boolean enable/disable.
370 */
371
372#define static_branch_enable(x) static_key_enable(&(x)->key)
373#define static_branch_disable(x) static_key_disable(&(x)->key)
374
216#endif /* _LINUX_JUMP_LABEL_H */ 375#endif /* _LINUX_JUMP_LABEL_H */
217 376
218#endif /* __ASSEMBLY__ */ 377#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5486d777b706..4b9f85c963d0 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -10,11 +10,19 @@ struct vm_struct;
10#ifdef CONFIG_KASAN 10#ifdef CONFIG_KASAN
11 11
12#define KASAN_SHADOW_SCALE_SHIFT 3 12#define KASAN_SHADOW_SCALE_SHIFT 3
13#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
14 13
15#include <asm/kasan.h> 14#include <asm/kasan.h>
15#include <asm/pgtable.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17 17
18extern unsigned char kasan_zero_page[PAGE_SIZE];
19extern pte_t kasan_zero_pte[PTRS_PER_PTE];
20extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
21extern pud_t kasan_zero_pud[PTRS_PER_PUD];
22
23void kasan_populate_zero_shadow(const void *shadow_start,
24 const void *shadow_end);
25
18static inline void *kasan_mem_to_shadow(const void *addr) 26static inline void *kasan_mem_to_shadow(const void *addr)
19{ 27{
20 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 28 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 123be25ea15a..5d4e9c4b821d 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -266,6 +266,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
266} 266}
267 267
268int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); 268int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
269size_t kernfs_path_len(struct kernfs_node *kn);
269char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, 270char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
270 size_t buflen); 271 size_t buflen);
271void pr_cont_kernfs_name(struct kernfs_node *kn); 272void pr_cont_kernfs_name(struct kernfs_node *kn);
@@ -332,6 +333,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
332static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) 333static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
333{ return -ENOSYS; } 334{ return -ENOSYS; }
334 335
336static inline size_t kernfs_path_len(struct kernfs_node *kn)
337{ return 0; }
338
335static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, 339static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
336 size_t buflen) 340 size_t buflen)
337{ return NULL; } 341{ return NULL; }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e804306ef5e8..d140b1e9faa7 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -16,7 +16,7 @@
16 16
17#include <uapi/linux/kexec.h> 17#include <uapi/linux/kexec.h>
18 18
19#ifdef CONFIG_KEXEC 19#ifdef CONFIG_KEXEC_CORE
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/linkage.h> 21#include <linux/linkage.h>
22#include <linux/compat.h> 22#include <linux/compat.h>
@@ -318,12 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
318size_t crash_get_memory_size(void); 318size_t crash_get_memory_size(void);
319void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); 319void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
320 320
321#else /* !CONFIG_KEXEC */ 321int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
322 unsigned long buf_len);
323void * __weak arch_kexec_kernel_image_load(struct kimage *image);
324int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
325int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
326 unsigned long buf_len);
327int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
328 Elf_Shdr *sechdrs, unsigned int relsec);
329int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
330 unsigned int relsec);
331
332#else /* !CONFIG_KEXEC_CORE */
322struct pt_regs; 333struct pt_regs;
323struct task_struct; 334struct task_struct;
324static inline void crash_kexec(struct pt_regs *regs) { } 335static inline void crash_kexec(struct pt_regs *regs) { }
325static inline int kexec_should_crash(struct task_struct *p) { return 0; } 336static inline int kexec_should_crash(struct task_struct *p) { return 0; }
326#endif /* CONFIG_KEXEC */ 337#define kexec_in_progress false
338#endif /* CONFIG_KEXEC_CORE */
327 339
328#endif /* !defined(__ASSEBMLY__) */ 340#endif /* !defined(__ASSEBMLY__) */
329 341
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 61e5b723ae73..953f283f8451 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
63extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, 63extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
64 struct klist_node *n); 64 struct klist_node *n);
65extern void klist_iter_exit(struct klist_iter *i); 65extern void klist_iter_exit(struct klist_iter *i);
66extern struct klist_node *klist_prev(struct klist_iter *i);
66extern struct klist_node *klist_next(struct klist_iter *i); 67extern struct klist_node *klist_next(struct klist_iter *i);
67 68
68#endif 69#endif
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 0555cc66a15b..fcfd2bf14d3f 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -85,8 +85,6 @@ enum umh_disable_depth {
85 UMH_DISABLED, 85 UMH_DISABLED,
86}; 86};
87 87
88extern void usermodehelper_init(void);
89
90extern int __usermodehelper_disable(enum umh_disable_depth depth); 88extern int __usermodehelper_disable(enum umh_disable_depth depth);
91extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); 89extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
92 90
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1ab54754a86d..8f6849084248 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern void show_registers(struct pt_regs *regs);
267extern void kprobes_inc_nmissed_count(struct kprobe *p); 267extern void kprobes_inc_nmissed_count(struct kprobe *p);
268extern bool arch_within_kprobe_blacklist(unsigned long addr); 268extern bool arch_within_kprobe_blacklist(unsigned long addr);
269 269
270extern bool within_kprobe_blacklist(unsigned long addr);
271
270struct kprobe_insn_cache { 272struct kprobe_insn_cache {
271 struct mutex mutex; 273 struct mutex mutex;
272 void *(*alloc)(void); /* allocate insn page */ 274 void *(*alloc)(void); /* allocate insn page */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 13d55206ccf6..e691b6a23f72 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -11,7 +11,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
11 const char namefmt[], ...); 11 const char namefmt[], ...);
12 12
13#define kthread_create(threadfn, data, namefmt, arg...) \ 13#define kthread_create(threadfn, data, namefmt, arg...) \
14 kthread_create_on_node(threadfn, data, -1, namefmt, ##arg) 14 kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
15 15
16 16
17struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 17struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
@@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
38}) 38})
39 39
40void kthread_bind(struct task_struct *k, unsigned int cpu); 40void kthread_bind(struct task_struct *k, unsigned int cpu);
41void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
41int kthread_stop(struct task_struct *k); 42int kthread_stop(struct task_struct *k);
42bool kthread_should_stop(void); 43bool kthread_should_stop(void);
43bool kthread_should_park(void); 44bool kthread_should_park(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 05e99b8ef465..1bef9e21e725 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -139,6 +139,7 @@ static inline bool is_error_page(struct page *page)
139#define KVM_REQ_DISABLE_IBS 24 139#define KVM_REQ_DISABLE_IBS 24
140#define KVM_REQ_APIC_PAGE_RELOAD 25 140#define KVM_REQ_APIC_PAGE_RELOAD 25
141#define KVM_REQ_SMI 26 141#define KVM_REQ_SMI 26
142#define KVM_REQ_HV_CRASH 27
142 143
143#define KVM_USERSPACE_IRQ_SOURCE_ID 0 144#define KVM_USERSPACE_IRQ_SOURCE_ID 0
144#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 145#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -241,6 +242,7 @@ struct kvm_vcpu {
241 int sigset_active; 242 int sigset_active;
242 sigset_t sigset; 243 sigset_t sigset;
243 struct kvm_vcpu_stat stat; 244 struct kvm_vcpu_stat stat;
245 unsigned int halt_poll_ns;
244 246
245#ifdef CONFIG_HAS_IOMEM 247#ifdef CONFIG_HAS_IOMEM
246 int mmio_needed; 248 int mmio_needed;
@@ -363,9 +365,6 @@ struct kvm {
363 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; 365 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
364 struct srcu_struct srcu; 366 struct srcu_struct srcu;
365 struct srcu_struct irq_srcu; 367 struct srcu_struct irq_srcu;
366#ifdef CONFIG_KVM_APIC_ARCHITECTURE
367 u32 bsp_vcpu_id;
368#endif
369 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 368 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
370 atomic_t online_vcpus; 369 atomic_t online_vcpus;
371 int last_boosted_vcpu; 370 int last_boosted_vcpu;
@@ -424,8 +423,15 @@ struct kvm {
424#define vcpu_unimpl(vcpu, fmt, ...) \ 423#define vcpu_unimpl(vcpu, fmt, ...) \
425 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 424 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
426 425
426#define vcpu_debug(vcpu, fmt, ...) \
427 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
428
427static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 429static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
428{ 430{
431 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
432 * the caller has read kvm->online_vcpus before (as is the case
433 * for kvm_for_each_vcpu, for example).
434 */
429 smp_rmb(); 435 smp_rmb();
430 return kvm->vcpus[i]; 436 return kvm->vcpus[i];
431} 437}
@@ -1055,22 +1061,9 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1055#endif /* CONFIG_HAVE_KVM_EVENTFD */ 1061#endif /* CONFIG_HAVE_KVM_EVENTFD */
1056 1062
1057#ifdef CONFIG_KVM_APIC_ARCHITECTURE 1063#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1058static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
1059{
1060 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
1061}
1062
1063static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
1064{
1065 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
1066}
1067
1068bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); 1064bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
1069
1070#else 1065#else
1071
1072static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } 1066static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
1073
1074#endif 1067#endif
1075 1068
1076static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1069static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 75e3af01ee32..3f021dc5da8c 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -31,6 +31,9 @@ enum {
31 ND_CMD_ARS_STATUS_MAX = SZ_4K, 31 ND_CMD_ARS_STATUS_MAX = SZ_4K,
32 ND_MAX_MAPPINGS = 32, 32 ND_MAX_MAPPINGS = 32,
33 33
34 /* region flag indicating to direct-map persistent memory by default */
35 ND_REGION_PAGEMAP = 0,
36
34 /* mark newly adjusted resources as requiring a label update */ 37 /* mark newly adjusted resources as requiring a label update */
35 DPA_RESOURCE_ADJUSTED = 1 << 0, 38 DPA_RESOURCE_ADJUSTED = 1 << 0,
36}; 39};
@@ -91,6 +94,7 @@ struct nd_region_desc {
91 void *provider_data; 94 void *provider_data;
92 int num_lanes; 95 int num_lanes;
93 int numa_node; 96 int numa_node;
97 unsigned long flags;
94}; 98};
95 99
96struct nvdimm_bus; 100struct nvdimm_bus;
diff --git a/include/linux/list.h b/include/linux/list.h
index feb773c76ee0..3e3e64a61002 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -672,6 +672,11 @@ static inline void hlist_add_fake(struct hlist_node *n)
672 n->pprev = &n->next; 672 n->pprev = &n->next;
673} 673}
674 674
675static inline bool hlist_fake(struct hlist_node *h)
676{
677 return h->pprev == &h->next;
678}
679
675/* 680/*
676 * Move a list from one list head to another. Fixup the pprev 681 * Move a list from one list head to another. Fixup the pprev
677 * reference of the first entry if it exists. 682 * reference of the first entry if it exists.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0bc095..fd4ca0b4fe0f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
55 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 55 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
56 */ 56 */
57 57
58#include <linux/atomic.h>
58#include <linux/kernel.h> 59#include <linux/kernel.h>
59#include <asm/cmpxchg.h>
60 60
61struct llist_head { 61struct llist_head {
62 struct llist_node *first; 62 struct llist_node *first;
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 1cc89e9df480..ffb9c9da4f39 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@ struct lsm_network_audit {
40 } fam; 40 } fam;
41}; 41};
42 42
43struct lsm_ioctlop_audit {
44 struct path path;
45 u16 cmd;
46};
47
43/* Auxiliary data to use in generating the audit record. */ 48/* Auxiliary data to use in generating the audit record. */
44struct common_audit_data { 49struct common_audit_data {
45 char type; 50 char type;
@@ -53,6 +58,7 @@ struct common_audit_data {
53#define LSM_AUDIT_DATA_KMOD 8 58#define LSM_AUDIT_DATA_KMOD 8
54#define LSM_AUDIT_DATA_INODE 9 59#define LSM_AUDIT_DATA_INODE 9
55#define LSM_AUDIT_DATA_DENTRY 10 60#define LSM_AUDIT_DATA_DENTRY 10
61#define LSM_AUDIT_DATA_IOCTL_OP 11
56 union { 62 union {
57 struct path path; 63 struct path path;
58 struct dentry *dentry; 64 struct dentry *dentry;
@@ -68,6 +74,7 @@ struct common_audit_data {
68 } key_struct; 74 } key_struct;
69#endif 75#endif
70 char *kmod_name; 76 char *kmod_name;
77 struct lsm_ioctlop_audit *op;
71 } u; 78 } u;
72 /* this union contains LSM specific data */ 79 /* this union contains LSM specific data */
73 union { 80 union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 9429f054c323..ec3a6bab29de 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1881,8 +1881,10 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
1881 1881
1882extern int __init security_module_enable(const char *module); 1882extern int __init security_module_enable(const char *module);
1883extern void __init capability_add_hooks(void); 1883extern void __init capability_add_hooks(void);
1884#ifdef CONFIG_SECURITY_YAMA_STACKED 1884#ifdef CONFIG_SECURITY_YAMA
1885void __init yama_add_hooks(void); 1885extern void __init yama_add_hooks(void);
1886#else
1887static inline void __init yama_add_hooks(void) { }
1886#endif 1888#endif
1887 1889
1888#endif /* ! __LINUX_LSM_HOOKS_H */ 1890#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 68c42454439b..74deadb42d76 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -9,7 +9,7 @@
9 9
10#include <linux/of.h> 10#include <linux/of.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/timer.h> 12#include <linux/hrtimer.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/completion.h> 14#include <linux/completion.h>
15 15
@@ -67,7 +67,8 @@ struct mbox_chan_ops {
67 * @txpoll_period: If 'txdone_poll' is in effect, the API polls for 67 * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
68 * last TX's status after these many millisecs 68 * last TX's status after these many millisecs
69 * @of_xlate: Controller driver specific mapping of channel via DT 69 * @of_xlate: Controller driver specific mapping of channel via DT
70 * @poll: API private. Used to poll for TXDONE on all channels. 70 * @poll_hrt: API private. hrtimer used to poll for TXDONE on all
71 * channels.
71 * @node: API private. To hook into list of controllers. 72 * @node: API private. To hook into list of controllers.
72 */ 73 */
73struct mbox_controller { 74struct mbox_controller {
@@ -81,7 +82,7 @@ struct mbox_controller {
81 struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, 82 struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
82 const struct of_phandle_args *sp); 83 const struct of_phandle_args *sp);
83 /* Internal to API */ 84 /* Internal to API */
84 struct timer_list poll; 85 struct hrtimer poll_hrt;
85 struct list_head node; 86 struct list_head node;
86}; 87};
87 88
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index a16b1f9c1aca..0962b2ca628a 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -6,6 +6,7 @@
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7 7
8struct mei_cl_device; 8struct mei_cl_device;
9struct mei_device;
9 10
10typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, 11typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
11 u32 events, void *context); 12 u32 events, void *context);
@@ -17,6 +18,8 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
17 * Drivers for MEI devices will get an mei_cl_device pointer 18 * Drivers for MEI devices will get an mei_cl_device pointer
18 * when being probed and shall use it for doing ME bus I/O. 19 * when being probed and shall use it for doing ME bus I/O.
19 * 20 *
21 * @bus_list: device on the bus list
22 * @bus: parent mei device
20 * @dev: linux driver model device pointer 23 * @dev: linux driver model device pointer
21 * @me_cl: me client 24 * @me_cl: me client
22 * @cl: mei client 25 * @cl: mei client
@@ -25,10 +28,16 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
25 * @event_cb: Drivers register this callback to get asynchronous ME 28 * @event_cb: Drivers register this callback to get asynchronous ME
26 * events (e.g. Rx buffer pending) notifications. 29 * events (e.g. Rx buffer pending) notifications.
27 * @event_context: event callback run context 30 * @event_context: event callback run context
31 * @events_mask: Events bit mask requested by driver.
28 * @events: Events bitmask sent to the driver. 32 * @events: Events bitmask sent to the driver.
33 *
34 * @do_match: wheather device can be matched with a driver
35 * @is_added: device is already scanned
29 * @priv_data: client private data 36 * @priv_data: client private data
30 */ 37 */
31struct mei_cl_device { 38struct mei_cl_device {
39 struct list_head bus_list;
40 struct mei_device *bus;
32 struct device dev; 41 struct device dev;
33 42
34 struct mei_me_client *me_cl; 43 struct mei_me_client *me_cl;
@@ -38,8 +47,12 @@ struct mei_cl_device {
38 struct work_struct event_work; 47 struct work_struct event_work;
39 mei_cl_event_cb_t event_cb; 48 mei_cl_event_cb_t event_cb;
40 void *event_context; 49 void *event_context;
50 unsigned long events_mask;
41 unsigned long events; 51 unsigned long events;
42 52
53 unsigned int do_match:1;
54 unsigned int is_added:1;
55
43 void *priv_data; 56 void *priv_data;
44}; 57};
45 58
@@ -65,10 +78,12 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
65ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); 78ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
66 79
67int mei_cl_register_event_cb(struct mei_cl_device *device, 80int mei_cl_register_event_cb(struct mei_cl_device *device,
81 unsigned long event_mask,
68 mei_cl_event_cb_t read_cb, void *context); 82 mei_cl_event_cb_t read_cb, void *context);
69 83
70#define MEI_CL_EVENT_RX 0 84#define MEI_CL_EVENT_RX 0
71#define MEI_CL_EVENT_TX 1 85#define MEI_CL_EVENT_TX 1
86#define MEI_CL_EVENT_NOTIF 2
72 87
73void *mei_cl_get_drvdata(const struct mei_cl_device *device); 88void *mei_cl_get_drvdata(const struct mei_cl_device *device);
74void mei_cl_set_drvdata(struct mei_cl_device *device, void *data); 89void mei_cl_set_drvdata(struct mei_cl_device *device, void *data);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index cc4b01972060..c518eb589260 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -77,6 +77,8 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
77int memblock_free(phys_addr_t base, phys_addr_t size); 77int memblock_free(phys_addr_t base, phys_addr_t size);
78int memblock_reserve(phys_addr_t base, phys_addr_t size); 78int memblock_reserve(phys_addr_t base, phys_addr_t size);
79void memblock_trim_memory(phys_addr_t align); 79void memblock_trim_memory(phys_addr_t align);
80bool memblock_overlaps_region(struct memblock_type *type,
81 phys_addr_t base, phys_addr_t size);
80int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 82int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
81int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 83int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
82int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); 84int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
@@ -323,7 +325,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
323int memblock_is_memory(phys_addr_t addr); 325int memblock_is_memory(phys_addr_t addr);
324int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 326int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
325int memblock_is_reserved(phys_addr_t addr); 327int memblock_is_reserved(phys_addr_t addr);
326int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 328bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
327 329
328extern void __memblock_dump_all(void); 330extern void __memblock_dump_all(void);
329 331
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 73b02b0a8f60..ad800e62cb7a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -23,6 +23,11 @@
23#include <linux/vm_event_item.h> 23#include <linux/vm_event_item.h>
24#include <linux/hardirq.h> 24#include <linux/hardirq.h>
25#include <linux/jump_label.h> 25#include <linux/jump_label.h>
26#include <linux/page_counter.h>
27#include <linux/vmpressure.h>
28#include <linux/eventfd.h>
29#include <linux/mmzone.h>
30#include <linux/writeback.h>
26 31
27struct mem_cgroup; 32struct mem_cgroup;
28struct page; 33struct page;
@@ -67,12 +72,221 @@ enum mem_cgroup_events_index {
67 MEMCG_NR_EVENTS, 72 MEMCG_NR_EVENTS,
68}; 73};
69 74
75/*
76 * Per memcg event counter is incremented at every pagein/pageout. With THP,
77 * it will be incremated by the number of pages. This counter is used for
78 * for trigger some periodic events. This is straightforward and better
79 * than using jiffies etc. to handle periodic memcg event.
80 */
81enum mem_cgroup_events_target {
82 MEM_CGROUP_TARGET_THRESH,
83 MEM_CGROUP_TARGET_SOFTLIMIT,
84 MEM_CGROUP_TARGET_NUMAINFO,
85 MEM_CGROUP_NTARGETS,
86};
87
88/*
89 * Bits in struct cg_proto.flags
90 */
91enum cg_proto_flags {
92 /* Currently active and new sockets should be assigned to cgroups */
93 MEMCG_SOCK_ACTIVE,
94 /* It was ever activated; we must disarm static keys on destruction */
95 MEMCG_SOCK_ACTIVATED,
96};
97
98struct cg_proto {
99 struct page_counter memory_allocated; /* Current allocated memory. */
100 struct percpu_counter sockets_allocated; /* Current number of sockets. */
101 int memory_pressure;
102 long sysctl_mem[3];
103 unsigned long flags;
104 /*
105 * memcg field is used to find which memcg we belong directly
106 * Each memcg struct can hold more than one cg_proto, so container_of
107 * won't really cut.
108 *
109 * The elegant solution would be having an inverse function to
110 * proto_cgroup in struct proto, but that means polluting the structure
111 * for everybody, instead of just for memcg users.
112 */
113 struct mem_cgroup *memcg;
114};
115
70#ifdef CONFIG_MEMCG 116#ifdef CONFIG_MEMCG
117struct mem_cgroup_stat_cpu {
118 long count[MEM_CGROUP_STAT_NSTATS];
119 unsigned long events[MEMCG_NR_EVENTS];
120 unsigned long nr_page_events;
121 unsigned long targets[MEM_CGROUP_NTARGETS];
122};
123
124struct mem_cgroup_reclaim_iter {
125 struct mem_cgroup *position;
126 /* scan generation, increased every round-trip */
127 unsigned int generation;
128};
129
130/*
131 * per-zone information in memory controller.
132 */
133struct mem_cgroup_per_zone {
134 struct lruvec lruvec;
135 unsigned long lru_size[NR_LRU_LISTS];
136
137 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
138
139 struct rb_node tree_node; /* RB tree node */
140 unsigned long usage_in_excess;/* Set to the value by which */
141 /* the soft limit is exceeded*/
142 bool on_tree;
143 struct mem_cgroup *memcg; /* Back pointer, we cannot */
144 /* use container_of */
145};
146
147struct mem_cgroup_per_node {
148 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
149};
150
151struct mem_cgroup_threshold {
152 struct eventfd_ctx *eventfd;
153 unsigned long threshold;
154};
155
156/* For threshold */
157struct mem_cgroup_threshold_ary {
158 /* An array index points to threshold just below or equal to usage. */
159 int current_threshold;
160 /* Size of entries[] */
161 unsigned int size;
162 /* Array of thresholds */
163 struct mem_cgroup_threshold entries[0];
164};
165
166struct mem_cgroup_thresholds {
167 /* Primary thresholds array */
168 struct mem_cgroup_threshold_ary *primary;
169 /*
170 * Spare threshold array.
171 * This is needed to make mem_cgroup_unregister_event() "never fail".
172 * It must be able to store at least primary->size - 1 entries.
173 */
174 struct mem_cgroup_threshold_ary *spare;
175};
176
177/*
178 * The memory controller data structure. The memory controller controls both
179 * page cache and RSS per cgroup. We would eventually like to provide
180 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
181 * to help the administrator determine what knobs to tune.
182 */
183struct mem_cgroup {
184 struct cgroup_subsys_state css;
185
186 /* Accounted resources */
187 struct page_counter memory;
188 struct page_counter memsw;
189 struct page_counter kmem;
190
191 /* Normal memory consumption range */
192 unsigned long low;
193 unsigned long high;
194
195 unsigned long soft_limit;
196
197 /* vmpressure notifications */
198 struct vmpressure vmpressure;
199
200 /* css_online() has been completed */
201 int initialized;
202
203 /*
204 * Should the accounting and control be hierarchical, per subtree?
205 */
206 bool use_hierarchy;
207
208 /* protected by memcg_oom_lock */
209 bool oom_lock;
210 int under_oom;
211
212 int swappiness;
213 /* OOM-Killer disable */
214 int oom_kill_disable;
215
216 /* protect arrays of thresholds */
217 struct mutex thresholds_lock;
218
219 /* thresholds for memory usage. RCU-protected */
220 struct mem_cgroup_thresholds thresholds;
221
222 /* thresholds for mem+swap usage. RCU-protected */
223 struct mem_cgroup_thresholds memsw_thresholds;
224
225 /* For oom notifier event fd */
226 struct list_head oom_notify;
227
228 /*
229 * Should we move charges of a task when a task is moved into this
230 * mem_cgroup ? And what type of charges should we move ?
231 */
232 unsigned long move_charge_at_immigrate;
233 /*
234 * set > 0 if pages under this cgroup are moving to other cgroup.
235 */
236 atomic_t moving_account;
237 /* taken only while moving_account > 0 */
238 spinlock_t move_lock;
239 struct task_struct *move_lock_task;
240 unsigned long move_lock_flags;
241 /*
242 * percpu counter.
243 */
244 struct mem_cgroup_stat_cpu __percpu *stat;
245 spinlock_t pcp_counter_lock;
246
247#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
248 struct cg_proto tcp_mem;
249#endif
250#if defined(CONFIG_MEMCG_KMEM)
251 /* Index in the kmem_cache->memcg_params.memcg_caches array */
252 int kmemcg_id;
253 bool kmem_acct_activated;
254 bool kmem_acct_active;
255#endif
256
257 int last_scanned_node;
258#if MAX_NUMNODES > 1
259 nodemask_t scan_nodes;
260 atomic_t numainfo_events;
261 atomic_t numainfo_updating;
262#endif
263
264#ifdef CONFIG_CGROUP_WRITEBACK
265 struct list_head cgwb_list;
266 struct wb_domain cgwb_domain;
267#endif
268
269 /* List of events which userspace want to receive */
270 struct list_head event_list;
271 spinlock_t event_list_lock;
272
273 struct mem_cgroup_per_node *nodeinfo[0];
274 /* WARNING: nodeinfo must be the last member here */
275};
71extern struct cgroup_subsys_state *mem_cgroup_root_css; 276extern struct cgroup_subsys_state *mem_cgroup_root_css;
72 277
73void mem_cgroup_events(struct mem_cgroup *memcg, 278/**
279 * mem_cgroup_events - count memory events against a cgroup
280 * @memcg: the memory cgroup
281 * @idx: the event index
282 * @nr: the number of events to account for
283 */
284static inline void mem_cgroup_events(struct mem_cgroup *memcg,
74 enum mem_cgroup_events_index idx, 285 enum mem_cgroup_events_index idx,
75 unsigned int nr); 286 unsigned int nr)
287{
288 this_cpu_add(memcg->stat->events[idx], nr);
289}
76 290
77bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 291bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
78 292
@@ -90,15 +304,29 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
90struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 304struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
91struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 305struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
92 306
93bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
94 struct mem_cgroup *root);
95bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); 307bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
308struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
309struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
96 310
97extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 311static inline
98extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 312struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
313 return css ? container_of(css, struct mem_cgroup, css) : NULL;
314}
99 315
100extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 316struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
101extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 317 struct mem_cgroup *,
318 struct mem_cgroup_reclaim_cookie *);
319void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
320
321static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
322 struct mem_cgroup *root)
323{
324 if (root == memcg)
325 return true;
326 if (!root->use_hierarchy)
327 return false;
328 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
329}
102 330
103static inline bool mm_match_cgroup(struct mm_struct *mm, 331static inline bool mm_match_cgroup(struct mm_struct *mm,
104 struct mem_cgroup *memcg) 332 struct mem_cgroup *memcg)
@@ -114,24 +342,68 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
114 return match; 342 return match;
115} 343}
116 344
117extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 345struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
118extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 346ino_t page_cgroup_ino(struct page *page);
119 347
120struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 348static inline bool mem_cgroup_disabled(void)
121 struct mem_cgroup *, 349{
122 struct mem_cgroup_reclaim_cookie *); 350 if (memory_cgrp_subsys.disabled)
123void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 351 return true;
352 return false;
353}
124 354
125/* 355/*
126 * For memory reclaim. 356 * For memory reclaim.
127 */ 357 */
128int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
129bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
130int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 358int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
131unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 359
132void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 360void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
133extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 361 int nr_pages);
134 struct task_struct *p); 362
363static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
364{
365 struct mem_cgroup_per_zone *mz;
366 struct mem_cgroup *memcg;
367
368 if (mem_cgroup_disabled())
369 return true;
370
371 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
372 memcg = mz->memcg;
373
374 return !!(memcg->css.flags & CSS_ONLINE);
375}
376
377static inline
378unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
379{
380 struct mem_cgroup_per_zone *mz;
381
382 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
383 return mz->lru_size[lru];
384}
385
386static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
387{
388 unsigned long inactive_ratio;
389 unsigned long inactive;
390 unsigned long active;
391 unsigned long gb;
392
393 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
394 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
395
396 gb = (inactive + active) >> (30 - PAGE_SHIFT);
397 if (gb)
398 inactive_ratio = int_sqrt(10 * gb);
399 else
400 inactive_ratio = 1;
401
402 return inactive * inactive_ratio < active;
403}
404
405void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
406 struct task_struct *p);
135 407
136static inline void mem_cgroup_oom_enable(void) 408static inline void mem_cgroup_oom_enable(void)
137{ 409{
@@ -156,18 +428,26 @@ bool mem_cgroup_oom_synchronize(bool wait);
156extern int do_swap_account; 428extern int do_swap_account;
157#endif 429#endif
158 430
159static inline bool mem_cgroup_disabled(void)
160{
161 if (memory_cgrp_subsys.disabled)
162 return true;
163 return false;
164}
165
166struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); 431struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
167void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
168 enum mem_cgroup_stat_index idx, int val);
169void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); 432void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
170 433
434/**
435 * mem_cgroup_update_page_stat - update page state statistics
436 * @memcg: memcg to account against
437 * @idx: page state item to account
438 * @val: number of pages (positive or negative)
439 *
440 * See mem_cgroup_begin_page_stat() for locking requirements.
441 */
442static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
443 enum mem_cgroup_stat_index idx, int val)
444{
445 VM_BUG_ON(!rcu_read_lock_held());
446
447 if (memcg)
448 this_cpu_add(memcg->stat->count[idx], val);
449}
450
171static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 451static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
172 enum mem_cgroup_stat_index idx) 452 enum mem_cgroup_stat_index idx)
173{ 453{
@@ -184,13 +464,31 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
184 gfp_t gfp_mask, 464 gfp_t gfp_mask,
185 unsigned long *total_scanned); 465 unsigned long *total_scanned);
186 466
187void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
188static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 467static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
189 enum vm_event_item idx) 468 enum vm_event_item idx)
190{ 469{
470 struct mem_cgroup *memcg;
471
191 if (mem_cgroup_disabled()) 472 if (mem_cgroup_disabled())
192 return; 473 return;
193 __mem_cgroup_count_vm_event(mm, idx); 474
475 rcu_read_lock();
476 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
477 if (unlikely(!memcg))
478 goto out;
479
480 switch (idx) {
481 case PGFAULT:
482 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
483 break;
484 case PGMAJFAULT:
485 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
486 break;
487 default:
488 BUG();
489 }
490out:
491 rcu_read_unlock();
194} 492}
195#ifdef CONFIG_TRANSPARENT_HUGEPAGE 493#ifdef CONFIG_TRANSPARENT_HUGEPAGE
196void mem_cgroup_split_huge_fixup(struct page *head); 494void mem_cgroup_split_huge_fixup(struct page *head);
@@ -199,8 +497,6 @@ void mem_cgroup_split_huge_fixup(struct page *head);
199#else /* CONFIG_MEMCG */ 497#else /* CONFIG_MEMCG */
200struct mem_cgroup; 498struct mem_cgroup;
201 499
202#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
203
204static inline void mem_cgroup_events(struct mem_cgroup *memcg, 500static inline void mem_cgroup_events(struct mem_cgroup *memcg,
205 enum mem_cgroup_events_index idx, 501 enum mem_cgroup_events_index idx,
206 unsigned int nr) 502 unsigned int nr)
@@ -258,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
258 return &zone->lruvec; 554 return &zone->lruvec;
259} 555}
260 556
261static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
262{
263 return NULL;
264}
265
266static inline bool mm_match_cgroup(struct mm_struct *mm, 557static inline bool mm_match_cgroup(struct mm_struct *mm,
267 struct mem_cgroup *memcg) 558 struct mem_cgroup *memcg)
268{ 559{
@@ -275,12 +566,6 @@ static inline bool task_in_mem_cgroup(struct task_struct *task,
275 return true; 566 return true;
276} 567}
277 568
278static inline struct cgroup_subsys_state
279 *mem_cgroup_css(struct mem_cgroup *memcg)
280{
281 return NULL;
282}
283
284static inline struct mem_cgroup * 569static inline struct mem_cgroup *
285mem_cgroup_iter(struct mem_cgroup *root, 570mem_cgroup_iter(struct mem_cgroup *root,
286 struct mem_cgroup *prev, 571 struct mem_cgroup *prev,
@@ -428,8 +713,8 @@ static inline void sock_release_memcg(struct sock *sk)
428extern struct static_key memcg_kmem_enabled_key; 713extern struct static_key memcg_kmem_enabled_key;
429 714
430extern int memcg_nr_cache_ids; 715extern int memcg_nr_cache_ids;
431extern void memcg_get_cache_ids(void); 716void memcg_get_cache_ids(void);
432extern void memcg_put_cache_ids(void); 717void memcg_put_cache_ids(void);
433 718
434/* 719/*
435 * Helper macro to loop through all memcg-specific caches. Callers must still 720 * Helper macro to loop through all memcg-specific caches. Callers must still
@@ -444,7 +729,10 @@ static inline bool memcg_kmem_enabled(void)
444 return static_key_false(&memcg_kmem_enabled_key); 729 return static_key_false(&memcg_kmem_enabled_key);
445} 730}
446 731
447bool memcg_kmem_is_active(struct mem_cgroup *memcg); 732static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
733{
734 return memcg->kmem_acct_active;
735}
448 736
449/* 737/*
450 * In general, we'll do everything in our power to not incur in any overhead 738 * In general, we'll do everything in our power to not incur in any overhead
@@ -463,7 +751,15 @@ void __memcg_kmem_commit_charge(struct page *page,
463 struct mem_cgroup *memcg, int order); 751 struct mem_cgroup *memcg, int order);
464void __memcg_kmem_uncharge_pages(struct page *page, int order); 752void __memcg_kmem_uncharge_pages(struct page *page, int order);
465 753
466int memcg_cache_id(struct mem_cgroup *memcg); 754/*
755 * helper for acessing a memcg's index. It will be used as an index in the
756 * child cache array in kmem_cache, and also to derive its name. This function
757 * will return -1 when this is not a kmem-limited memcg.
758 */
759static inline int memcg_cache_id(struct mem_cgroup *memcg)
760{
761 return memcg ? memcg->kmemcg_id : -1;
762}
467 763
468struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); 764struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
469void __memcg_kmem_put_cache(struct kmem_cache *cachep); 765void __memcg_kmem_put_cache(struct kmem_cache *cachep);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 6ffa0ac7f7d6..8f60e899b33c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -266,8 +266,9 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
266extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 266extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
267 void *arg, int (*func)(struct memory_block *, void *)); 267 void *arg, int (*func)(struct memory_block *, void *));
268extern int add_memory(int nid, u64 start, u64 size); 268extern int add_memory(int nid, u64 start, u64 size);
269extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default); 269extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
270extern int arch_add_memory(int nid, u64 start, u64 size); 270 bool for_device);
271extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
271extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 272extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
272extern bool is_memblock_offlined(struct memory_block *mem); 273extern bool is_memblock_offlined(struct memory_block *mem);
273extern void remove_memory(int nid, u64 start, u64 size); 274extern void remove_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 97cb283cc8e1..8fcad63fab55 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -60,60 +60,60 @@ enum {
60/* page 0 basic: slave adder 0x60 */ 60/* page 0 basic: slave adder 0x60 */
61 61
62#define PM800_STATUS_1 (0x01) 62#define PM800_STATUS_1 (0x01)
63#define PM800_ONKEY_STS1 (1 << 0) 63#define PM800_ONKEY_STS1 BIT(0)
64#define PM800_EXTON_STS1 (1 << 1) 64#define PM800_EXTON_STS1 BIT(1)
65#define PM800_CHG_STS1 (1 << 2) 65#define PM800_CHG_STS1 BIT(2)
66#define PM800_BAT_STS1 (1 << 3) 66#define PM800_BAT_STS1 BIT(3)
67#define PM800_VBUS_STS1 (1 << 4) 67#define PM800_VBUS_STS1 BIT(4)
68#define PM800_LDO_PGOOD_STS1 (1 << 5) 68#define PM800_LDO_PGOOD_STS1 BIT(5)
69#define PM800_BUCK_PGOOD_STS1 (1 << 6) 69#define PM800_BUCK_PGOOD_STS1 BIT(6)
70 70
71#define PM800_STATUS_2 (0x02) 71#define PM800_STATUS_2 (0x02)
72#define PM800_RTC_ALARM_STS2 (1 << 0) 72#define PM800_RTC_ALARM_STS2 BIT(0)
73 73
74/* Wakeup Registers */ 74/* Wakeup Registers */
75#define PM800_WAKEUP1 (0x0D) 75#define PM800_WAKEUP1 (0x0D)
76 76
77#define PM800_WAKEUP2 (0x0E) 77#define PM800_WAKEUP2 (0x0E)
78#define PM800_WAKEUP2_INV_INT (1 << 0) 78#define PM800_WAKEUP2_INV_INT BIT(0)
79#define PM800_WAKEUP2_INT_CLEAR (1 << 1) 79#define PM800_WAKEUP2_INT_CLEAR BIT(1)
80#define PM800_WAKEUP2_INT_MASK (1 << 2) 80#define PM800_WAKEUP2_INT_MASK BIT(2)
81 81
82#define PM800_POWER_UP_LOG (0x10) 82#define PM800_POWER_UP_LOG (0x10)
83 83
84/* Referance and low power registers */ 84/* Referance and low power registers */
85#define PM800_LOW_POWER1 (0x20) 85#define PM800_LOW_POWER1 (0x20)
86#define PM800_LOW_POWER2 (0x21) 86#define PM800_LOW_POWER2 (0x21)
87#define PM800_LOW_POWER_CONFIG3 (0x22) 87#define PM800_LOW_POWER_CONFIG3 (0x22)
88#define PM800_LOW_POWER_CONFIG4 (0x23) 88#define PM800_LOW_POWER_CONFIG4 (0x23)
89 89
90/* GPIO register */ 90/* GPIO register */
91#define PM800_GPIO_0_1_CNTRL (0x30) 91#define PM800_GPIO_0_1_CNTRL (0x30)
92#define PM800_GPIO0_VAL (1 << 0) 92#define PM800_GPIO0_VAL BIT(0)
93#define PM800_GPIO0_GPIO_MODE(x) (x << 1) 93#define PM800_GPIO0_GPIO_MODE(x) (x << 1)
94#define PM800_GPIO1_VAL (1 << 4) 94#define PM800_GPIO1_VAL BIT(4)
95#define PM800_GPIO1_GPIO_MODE(x) (x << 5) 95#define PM800_GPIO1_GPIO_MODE(x) (x << 5)
96 96
97#define PM800_GPIO_2_3_CNTRL (0x31) 97#define PM800_GPIO_2_3_CNTRL (0x31)
98#define PM800_GPIO2_VAL (1 << 0) 98#define PM800_GPIO2_VAL BIT(0)
99#define PM800_GPIO2_GPIO_MODE(x) (x << 1) 99#define PM800_GPIO2_GPIO_MODE(x) (x << 1)
100#define PM800_GPIO3_VAL (1 << 4) 100#define PM800_GPIO3_VAL BIT(4)
101#define PM800_GPIO3_GPIO_MODE(x) (x << 5) 101#define PM800_GPIO3_GPIO_MODE(x) (x << 5)
102#define PM800_GPIO3_MODE_MASK 0x1F 102#define PM800_GPIO3_MODE_MASK 0x1F
103#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6) 103#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6)
104 104
105#define PM800_GPIO_4_CNTRL (0x32) 105#define PM800_GPIO_4_CNTRL (0x32)
106#define PM800_GPIO4_VAL (1 << 0) 106#define PM800_GPIO4_VAL BIT(0)
107#define PM800_GPIO4_GPIO_MODE(x) (x << 1) 107#define PM800_GPIO4_GPIO_MODE(x) (x << 1)
108 108
109#define PM800_HEADSET_CNTRL (0x38) 109#define PM800_HEADSET_CNTRL (0x38)
110#define PM800_HEADSET_DET_EN (1 << 7) 110#define PM800_HEADSET_DET_EN BIT(7)
111#define PM800_HSDET_SLP (1 << 1) 111#define PM800_HSDET_SLP BIT(1)
112/* PWM register */ 112/* PWM register */
113#define PM800_PWM1 (0x40) 113#define PM800_PWM1 (0x40)
114#define PM800_PWM2 (0x41) 114#define PM800_PWM2 (0x41)
115#define PM800_PWM3 (0x42) 115#define PM800_PWM3 (0x42)
116#define PM800_PWM4 (0x43) 116#define PM800_PWM4 (0x43)
117 117
118/* RTC Registers */ 118/* RTC Registers */
119#define PM800_RTC_CONTROL (0xD0) 119#define PM800_RTC_CONTROL (0xD0)
@@ -123,55 +123,55 @@ enum {
123#define PM800_RTC_MISC4 (0xE4) 123#define PM800_RTC_MISC4 (0xE4)
124#define PM800_RTC_MISC5 (0xE7) 124#define PM800_RTC_MISC5 (0xE7)
125/* bit definitions of RTC Register 1 (0xD0) */ 125/* bit definitions of RTC Register 1 (0xD0) */
126#define PM800_ALARM1_EN (1 << 0) 126#define PM800_ALARM1_EN BIT(0)
127#define PM800_ALARM_WAKEUP (1 << 4) 127#define PM800_ALARM_WAKEUP BIT(4)
128#define PM800_ALARM (1 << 5) 128#define PM800_ALARM BIT(5)
129#define PM800_RTC1_USE_XO (1 << 7) 129#define PM800_RTC1_USE_XO BIT(7)
130 130
131/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */ 131/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
132 132
133/* buck registers */ 133/* buck registers */
134#define PM800_SLEEP_BUCK1 (0x30) 134#define PM800_SLEEP_BUCK1 (0x30)
135 135
136/* BUCK Sleep Mode Register 1: BUCK[1..4] */ 136/* BUCK Sleep Mode Register 1: BUCK[1..4] */
137#define PM800_BUCK_SLP1 (0x5A) 137#define PM800_BUCK_SLP1 (0x5A)
138#define PM800_BUCK1_SLP1_SHIFT 0 138#define PM800_BUCK1_SLP1_SHIFT 0
139#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT) 139#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
140 140
141/* page 2 GPADC: slave adder 0x02 */ 141/* page 2 GPADC: slave adder 0x02 */
142#define PM800_GPADC_MEAS_EN1 (0x01) 142#define PM800_GPADC_MEAS_EN1 (0x01)
143#define PM800_MEAS_EN1_VBAT (1 << 2) 143#define PM800_MEAS_EN1_VBAT BIT(2)
144#define PM800_GPADC_MEAS_EN2 (0x02) 144#define PM800_GPADC_MEAS_EN2 (0x02)
145#define PM800_MEAS_EN2_RFTMP (1 << 0) 145#define PM800_MEAS_EN2_RFTMP BIT(0)
146#define PM800_MEAS_GP0_EN (1 << 2) 146#define PM800_MEAS_GP0_EN BIT(2)
147#define PM800_MEAS_GP1_EN (1 << 3) 147#define PM800_MEAS_GP1_EN BIT(3)
148#define PM800_MEAS_GP2_EN (1 << 4) 148#define PM800_MEAS_GP2_EN BIT(4)
149#define PM800_MEAS_GP3_EN (1 << 5) 149#define PM800_MEAS_GP3_EN BIT(5)
150#define PM800_MEAS_GP4_EN (1 << 6) 150#define PM800_MEAS_GP4_EN BIT(6)
151 151
152#define PM800_GPADC_MISC_CONFIG1 (0x05) 152#define PM800_GPADC_MISC_CONFIG1 (0x05)
153#define PM800_GPADC_MISC_CONFIG2 (0x06) 153#define PM800_GPADC_MISC_CONFIG2 (0x06)
154#define PM800_GPADC_MISC_GPFSM_EN (1 << 0) 154#define PM800_GPADC_MISC_GPFSM_EN BIT(0)
155#define PM800_GPADC_SLOW_MODE(x) (x << 3) 155#define PM800_GPADC_SLOW_MODE(x) (x << 3)
156 156
157#define PM800_GPADC_MISC_CONFIG3 (0x09) 157#define PM800_GPADC_MISC_CONFIG3 (0x09)
158#define PM800_GPADC_MISC_CONFIG4 (0x0A) 158#define PM800_GPADC_MISC_CONFIG4 (0x0A)
159 159
160#define PM800_GPADC_PREBIAS1 (0x0F) 160#define PM800_GPADC_PREBIAS1 (0x0F)
161#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0) 161#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0)
162#define PM800_GPADC_PREBIAS2 (0x10) 162#define PM800_GPADC_PREBIAS2 (0x10)
163 163
164#define PM800_GP_BIAS_ENA1 (0x14) 164#define PM800_GP_BIAS_ENA1 (0x14)
165#define PM800_GPADC_GP_BIAS_EN0 (1 << 0) 165#define PM800_GPADC_GP_BIAS_EN0 BIT(0)
166#define PM800_GPADC_GP_BIAS_EN1 (1 << 1) 166#define PM800_GPADC_GP_BIAS_EN1 BIT(1)
167#define PM800_GPADC_GP_BIAS_EN2 (1 << 2) 167#define PM800_GPADC_GP_BIAS_EN2 BIT(2)
168#define PM800_GPADC_GP_BIAS_EN3 (1 << 3) 168#define PM800_GPADC_GP_BIAS_EN3 BIT(3)
169 169
170#define PM800_GP_BIAS_OUT1 (0x15) 170#define PM800_GP_BIAS_OUT1 (0x15)
171#define PM800_BIAS_OUT_GP0 (1 << 0) 171#define PM800_BIAS_OUT_GP0 BIT(0)
172#define PM800_BIAS_OUT_GP1 (1 << 1) 172#define PM800_BIAS_OUT_GP1 BIT(1)
173#define PM800_BIAS_OUT_GP2 (1 << 2) 173#define PM800_BIAS_OUT_GP2 BIT(2)
174#define PM800_BIAS_OUT_GP3 (1 << 3) 174#define PM800_BIAS_OUT_GP3 BIT(3)
175 175
176#define PM800_GPADC0_LOW_TH 0x20 176#define PM800_GPADC0_LOW_TH 0x20
177#define PM800_GPADC1_LOW_TH 0x21 177#define PM800_GPADC1_LOW_TH 0x21
@@ -222,37 +222,37 @@ enum {
222 222
223#define PM805_INT_STATUS1 (0x03) 223#define PM805_INT_STATUS1 (0x03)
224 224
225#define PM805_INT1_HP1_SHRT (1 << 0) 225#define PM805_INT1_HP1_SHRT BIT(0)
226#define PM805_INT1_HP2_SHRT (1 << 1) 226#define PM805_INT1_HP2_SHRT BIT(1)
227#define PM805_INT1_MIC_CONFLICT (1 << 2) 227#define PM805_INT1_MIC_CONFLICT BIT(2)
228#define PM805_INT1_CLIP_FAULT (1 << 3) 228#define PM805_INT1_CLIP_FAULT BIT(3)
229#define PM805_INT1_LDO_OFF (1 << 4) 229#define PM805_INT1_LDO_OFF BIT(4)
230#define PM805_INT1_SRC_DPLL_LOCK (1 << 5) 230#define PM805_INT1_SRC_DPLL_LOCK BIT(5)
231 231
232#define PM805_INT_STATUS2 (0x04) 232#define PM805_INT_STATUS2 (0x04)
233 233
234#define PM805_INT2_MIC_DET (1 << 0) 234#define PM805_INT2_MIC_DET BIT(0)
235#define PM805_INT2_SHRT_BTN_DET (1 << 1) 235#define PM805_INT2_SHRT_BTN_DET BIT(1)
236#define PM805_INT2_VOLM_BTN_DET (1 << 2) 236#define PM805_INT2_VOLM_BTN_DET BIT(2)
237#define PM805_INT2_VOLP_BTN_DET (1 << 3) 237#define PM805_INT2_VOLP_BTN_DET BIT(3)
238#define PM805_INT2_RAW_PLL_FAULT (1 << 4) 238#define PM805_INT2_RAW_PLL_FAULT BIT(4)
239#define PM805_INT2_FINE_PLL_FAULT (1 << 5) 239#define PM805_INT2_FINE_PLL_FAULT BIT(5)
240 240
241#define PM805_INT_MASK1 (0x05) 241#define PM805_INT_MASK1 (0x05)
242#define PM805_INT_MASK2 (0x06) 242#define PM805_INT_MASK2 (0x06)
243#define PM805_SHRT_BTN_DET (1 << 1) 243#define PM805_SHRT_BTN_DET BIT(1)
244 244
245/* number of status and int reg in a row */ 245/* number of status and int reg in a row */
246#define PM805_INT_REG_NUM (2) 246#define PM805_INT_REG_NUM (2)
247 247
248#define PM805_MIC_DET1 (0x07) 248#define PM805_MIC_DET1 (0x07)
249#define PM805_MIC_DET_EN_MIC_DET (1 << 0) 249#define PM805_MIC_DET_EN_MIC_DET BIT(0)
250#define PM805_MIC_DET2 (0x08) 250#define PM805_MIC_DET2 (0x08)
251#define PM805_MIC_DET_STATUS1 (0x09) 251#define PM805_MIC_DET_STATUS1 (0x09)
252 252
253#define PM805_MIC_DET_STATUS3 (0x0A) 253#define PM805_MIC_DET_STATUS3 (0x0A)
254#define PM805_AUTO_SEQ_STATUS1 (0x0B) 254#define PM805_AUTO_SEQ_STATUS1 (0x0B)
255#define PM805_AUTO_SEQ_STATUS2 (0x0C) 255#define PM805_AUTO_SEQ_STATUS2 (0x0C)
256 256
257#define PM805_ADC_SETTING1 (0x10) 257#define PM805_ADC_SETTING1 (0x10)
258#define PM805_ADC_SETTING2 (0x11) 258#define PM805_ADC_SETTING2 (0x11)
@@ -261,7 +261,7 @@ enum {
261#define PM805_ADC_GAIN2 (0x13) 261#define PM805_ADC_GAIN2 (0x13)
262#define PM805_DMIC_SETTING (0x15) 262#define PM805_DMIC_SETTING (0x15)
263#define PM805_DWS_SETTING (0x16) 263#define PM805_DWS_SETTING (0x16)
264#define PM805_MIC_CONFLICT_STS (0x17) 264#define PM805_MIC_CONFLICT_STS (0x17)
265 265
266#define PM805_PDM_SETTING1 (0x20) 266#define PM805_PDM_SETTING1 (0x20)
267#define PM805_PDM_SETTING2 (0x21) 267#define PM805_PDM_SETTING2 (0x21)
@@ -270,11 +270,11 @@ enum {
270#define PM805_PDM_CONTROL2 (0x24) 270#define PM805_PDM_CONTROL2 (0x24)
271#define PM805_PDM_CONTROL3 (0x25) 271#define PM805_PDM_CONTROL3 (0x25)
272 272
273#define PM805_HEADPHONE_SETTING (0x26) 273#define PM805_HEADPHONE_SETTING (0x26)
274#define PM805_HEADPHONE_GAIN_A2A (0x27) 274#define PM805_HEADPHONE_GAIN_A2A (0x27)
275#define PM805_HEADPHONE_SHORT_STATE (0x28) 275#define PM805_HEADPHONE_SHORT_STATE (0x28)
276#define PM805_EARPHONE_SETTING (0x29) 276#define PM805_EARPHONE_SETTING (0x29)
277#define PM805_AUTO_SEQ_SETTING (0x2A) 277#define PM805_AUTO_SEQ_SETTING (0x2A)
278 278
279struct pm80x_rtc_pdata { 279struct pm80x_rtc_pdata {
280 int vrtc; 280 int vrtc;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 2f434f4f79a1..79e607e2f081 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@ enum arizona_type {
25 WM5110 = 2, 25 WM5110 = 2,
26 WM8997 = 3, 26 WM8997 = 3,
27 WM8280 = 4, 27 WM8280 = 4,
28 WM8998 = 5,
29 WM1814 = 6,
28}; 30};
29 31
30#define ARIZONA_IRQ_GP1 0 32#define ARIZONA_IRQ_GP1 0
@@ -165,6 +167,7 @@ static inline int wm5102_patch(struct arizona *arizona)
165 167
166int wm5110_patch(struct arizona *arizona); 168int wm5110_patch(struct arizona *arizona);
167int wm8997_patch(struct arizona *arizona); 169int wm8997_patch(struct arizona *arizona);
170int wm8998_patch(struct arizona *arizona);
168 171
169extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop, 172extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
170 bool mandatory); 173 bool mandatory);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 43db4faad143..1dc385850ba2 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@ struct arizona_pdata {
101 * useful for systems where and I2S bus with multiple data 101 * useful for systems where and I2S bus with multiple data
102 * lines is mastered. 102 * lines is mastered.
103 */ 103 */
104 int max_channels_clocked[ARIZONA_MAX_AIF]; 104 unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
105 105
106 /** GPIO5 is used for jack detection */ 106 /** GPIO5 is used for jack detection */
107 bool jd_gpio5; 107 bool jd_gpio5;
@@ -125,22 +125,22 @@ struct arizona_pdata {
125 unsigned int hpdet_channel; 125 unsigned int hpdet_channel;
126 126
127 /** Extra debounce timeout used during initial mic detection (ms) */ 127 /** Extra debounce timeout used during initial mic detection (ms) */
128 int micd_detect_debounce; 128 unsigned int micd_detect_debounce;
129 129
130 /** GPIO for mic detection polarity */ 130 /** GPIO for mic detection polarity */
131 int micd_pol_gpio; 131 int micd_pol_gpio;
132 132
133 /** Mic detect ramp rate */ 133 /** Mic detect ramp rate */
134 int micd_bias_start_time; 134 unsigned int micd_bias_start_time;
135 135
136 /** Mic detect sample rate */ 136 /** Mic detect sample rate */
137 int micd_rate; 137 unsigned int micd_rate;
138 138
139 /** Mic detect debounce level */ 139 /** Mic detect debounce level */
140 int micd_dbtime; 140 unsigned int micd_dbtime;
141 141
142 /** Mic detect timeout (ms) */ 142 /** Mic detect timeout (ms) */
143 int micd_timeout; 143 unsigned int micd_timeout;
144 144
145 /** Force MICBIAS on for mic detect */ 145 /** Force MICBIAS on for mic detect */
146 bool micd_force_micbias; 146 bool micd_force_micbias;
@@ -162,6 +162,8 @@ struct arizona_pdata {
162 /** 162 /**
163 * Mode of input structures 163 * Mode of input structures
164 * One of the ARIZONA_INMODE_xxx values 164 * One of the ARIZONA_INMODE_xxx values
165 * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
166 * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
165 */ 167 */
166 int inmode[ARIZONA_MAX_INPUT]; 168 int inmode[ARIZONA_MAX_INPUT];
167 169
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 3499d36e6067..fdd70b3c7418 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
39#define ARIZONA_PWM_DRIVE_3 0x32 39#define ARIZONA_PWM_DRIVE_3 0x32
40#define ARIZONA_WAKE_CONTROL 0x40 40#define ARIZONA_WAKE_CONTROL 0x40
41#define ARIZONA_SEQUENCE_CONTROL 0x41 41#define ARIZONA_SEQUENCE_CONTROL 0x41
42#define ARIZONA_SPARE_TRIGGERS 0x42
42#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61 43#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
43#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62 44#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
44#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63 45#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
@@ -139,6 +140,7 @@
139#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7 140#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7
140#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8 141#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8
141#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9 142#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9
143#define ARIZONA_MIC_DETECT_4 0x2AB
142#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3 144#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3
143#define ARIZONA_ISOLATION_CONTROL 0x2CB 145#define ARIZONA_ISOLATION_CONTROL 0x2CB
144#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3 146#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3
@@ -225,14 +227,18 @@
225#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E 227#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E
226#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F 228#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F
227#define ARIZONA_DRE_ENABLE 0x440 229#define ARIZONA_DRE_ENABLE 0x440
230#define ARIZONA_DRE_CONTROL_1 0x441
228#define ARIZONA_DRE_CONTROL_2 0x442 231#define ARIZONA_DRE_CONTROL_2 0x442
229#define ARIZONA_DRE_CONTROL_3 0x443 232#define ARIZONA_DRE_CONTROL_3 0x443
233#define ARIZONA_EDRE_ENABLE 0x448
230#define ARIZONA_DAC_AEC_CONTROL_1 0x450 234#define ARIZONA_DAC_AEC_CONTROL_1 0x450
235#define ARIZONA_DAC_AEC_CONTROL_2 0x451
231#define ARIZONA_NOISE_GATE_CONTROL 0x458 236#define ARIZONA_NOISE_GATE_CONTROL 0x458
232#define ARIZONA_PDM_SPK1_CTRL_1 0x490 237#define ARIZONA_PDM_SPK1_CTRL_1 0x490
233#define ARIZONA_PDM_SPK1_CTRL_2 0x491 238#define ARIZONA_PDM_SPK1_CTRL_2 0x491
234#define ARIZONA_PDM_SPK2_CTRL_1 0x492 239#define ARIZONA_PDM_SPK2_CTRL_1 0x492
235#define ARIZONA_PDM_SPK2_CTRL_2 0x493 240#define ARIZONA_PDM_SPK2_CTRL_2 0x493
241#define ARIZONA_HP_TEST_CTRL_13 0x49A
236#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 242#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
237#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 243#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
238#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 244#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
@@ -310,6 +316,10 @@
310#define ARIZONA_AIF3_TX_ENABLES 0x599 316#define ARIZONA_AIF3_TX_ENABLES 0x599
311#define ARIZONA_AIF3_RX_ENABLES 0x59A 317#define ARIZONA_AIF3_RX_ENABLES 0x59A
312#define ARIZONA_AIF3_FORCE_WRITE 0x59B 318#define ARIZONA_AIF3_FORCE_WRITE 0x59B
319#define ARIZONA_SPD1_TX_CONTROL 0x5C2
320#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
321#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
322#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
313#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3 323#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
314#define ARIZONA_SLIMBUS_RATES_1 0x5E5 324#define ARIZONA_SLIMBUS_RATES_1 0x5E5
315#define ARIZONA_SLIMBUS_RATES_2 0x5E6 325#define ARIZONA_SLIMBUS_RATES_2 0x5E6
@@ -643,6 +653,10 @@
643#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD 653#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
644#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE 654#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
645#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF 655#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
656#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE 0x800
657#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME 0x801
658#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE 0x808
659#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME 0x809
646#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880 660#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880
647#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881 661#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881
648#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882 662#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882
@@ -868,6 +882,7 @@
868#define ARIZONA_GPIO5_CTRL 0xC04 882#define ARIZONA_GPIO5_CTRL 0xC04
869#define ARIZONA_IRQ_CTRL_1 0xC0F 883#define ARIZONA_IRQ_CTRL_1 0xC0F
870#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10 884#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10
885#define ARIZONA_GP_SWITCH_1 0xC18
871#define ARIZONA_MISC_PAD_CTRL_1 0xC20 886#define ARIZONA_MISC_PAD_CTRL_1 0xC20
872#define ARIZONA_MISC_PAD_CTRL_2 0xC21 887#define ARIZONA_MISC_PAD_CTRL_2 0xC21
873#define ARIZONA_MISC_PAD_CTRL_3 0xC22 888#define ARIZONA_MISC_PAD_CTRL_3 0xC22
@@ -1169,6 +1184,13 @@
1169#define ARIZONA_DSP4_SCRATCH_1 0x1441 1184#define ARIZONA_DSP4_SCRATCH_1 0x1441
1170#define ARIZONA_DSP4_SCRATCH_2 0x1442 1185#define ARIZONA_DSP4_SCRATCH_2 0x1442
1171#define ARIZONA_DSP4_SCRATCH_3 0x1443 1186#define ARIZONA_DSP4_SCRATCH_3 0x1443
1187#define ARIZONA_FRF_COEFF_1 0x1700
1188#define ARIZONA_FRF_COEFF_2 0x1701
1189#define ARIZONA_FRF_COEFF_3 0x1702
1190#define ARIZONA_FRF_COEFF_4 0x1703
1191#define ARIZONA_V2_DAC_COMP_1 0x1704
1192#define ARIZONA_V2_DAC_COMP_2 0x1705
1193
1172 1194
1173/* 1195/*
1174 * Field Definitions. 1196 * Field Definitions.
@@ -1431,6 +1453,42 @@
1431#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */ 1453#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */
1432 1454
1433/* 1455/*
1456 * R66 (0x42) - Spare Triggers
1457 */
1458#define ARIZONA_WS_TRG8 0x0080 /* WS_TRG8 */
1459#define ARIZONA_WS_TRG8_MASK 0x0080 /* WS_TRG8 */
1460#define ARIZONA_WS_TRG8_SHIFT 7 /* WS_TRG8 */
1461#define ARIZONA_WS_TRG8_WIDTH 1 /* WS_TRG8 */
1462#define ARIZONA_WS_TRG7 0x0040 /* WS_TRG7 */
1463#define ARIZONA_WS_TRG7_MASK 0x0040 /* WS_TRG7 */
1464#define ARIZONA_WS_TRG7_SHIFT 6 /* WS_TRG7 */
1465#define ARIZONA_WS_TRG7_WIDTH 1 /* WS_TRG7 */
1466#define ARIZONA_WS_TRG6 0x0020 /* WS_TRG6 */
1467#define ARIZONA_WS_TRG6_MASK 0x0020 /* WS_TRG6 */
1468#define ARIZONA_WS_TRG6_SHIFT 5 /* WS_TRG6 */
1469#define ARIZONA_WS_TRG6_WIDTH 1 /* WS_TRG6 */
1470#define ARIZONA_WS_TRG5 0x0010 /* WS_TRG5 */
1471#define ARIZONA_WS_TRG5_MASK 0x0010 /* WS_TRG5 */
1472#define ARIZONA_WS_TRG5_SHIFT 4 /* WS_TRG5 */
1473#define ARIZONA_WS_TRG5_WIDTH 1 /* WS_TRG5 */
1474#define ARIZONA_WS_TRG4 0x0008 /* WS_TRG4 */
1475#define ARIZONA_WS_TRG4_MASK 0x0008 /* WS_TRG4 */
1476#define ARIZONA_WS_TRG4_SHIFT 3 /* WS_TRG4 */
1477#define ARIZONA_WS_TRG4_WIDTH 1 /* WS_TRG4 */
1478#define ARIZONA_WS_TRG3 0x0004 /* WS_TRG3 */
1479#define ARIZONA_WS_TRG3_MASK 0x0004 /* WS_TRG3 */
1480#define ARIZONA_WS_TRG3_SHIFT 2 /* WS_TRG3 */
1481#define ARIZONA_WS_TRG3_WIDTH 1 /* WS_TRG3 */
1482#define ARIZONA_WS_TRG2 0x0002 /* WS_TRG2 */
1483#define ARIZONA_WS_TRG2_MASK 0x0002 /* WS_TRG2 */
1484#define ARIZONA_WS_TRG2_SHIFT 1 /* WS_TRG2 */
1485#define ARIZONA_WS_TRG2_WIDTH 1 /* WS_TRG2 */
1486#define ARIZONA_WS_TRG1 0x0001 /* WS_TRG1 */
1487#define ARIZONA_WS_TRG1_MASK 0x0001 /* WS_TRG1 */
1488#define ARIZONA_WS_TRG1_SHIFT 0 /* WS_TRG1 */
1489#define ARIZONA_WS_TRG1_WIDTH 1 /* WS_TRG1 */
1490
1491/*
1434 * R97 (0x61) - Sample Rate Sequence Select 1 1492 * R97 (0x61) - Sample Rate Sequence Select 1
1435 */ 1493 */
1436#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */ 1494#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2325,6 +2383,9 @@
2325#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */ 2383#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */
2326#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */ 2384#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */
2327#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */ 2385#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */
2386#define WM8998_HP_RATE_MASK 0x0006 /* HP_RATE - [2:1] */
2387#define WM8998_HP_RATE_SHIFT 1 /* HP_RATE - [2:1] */
2388#define WM8998_HP_RATE_WIDTH 2 /* HP_RATE - [2:1] */
2328#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */ 2389#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */
2329#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */ 2390#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */
2330#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */ 2391#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */
@@ -2413,6 +2474,16 @@
2413#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */ 2474#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */
2414 2475
2415/* 2476/*
2477 * R683 (0x2AB) - Mic Detect 4
2478 */
2479#define ARIZONA_MICDET_ADCVAL_DIFF_MASK 0xFF00 /* MICDET_ADCVAL_DIFF - [15:8] */
2480#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT 8 /* MICDET_ADCVAL_DIFF - [15:8] */
2481#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH 8 /* MICDET_ADCVAL_DIFF - [15:8] */
2482#define ARIZONA_MICDET_ADCVAL_MASK 0x007F /* MICDET_ADCVAL - [15:8] */
2483#define ARIZONA_MICDET_ADCVAL_SHIFT 0 /* MICDET_ADCVAL - [15:8] */
2484#define ARIZONA_MICDET_ADCVAL_WIDTH 7 /* MICDET_ADCVAL - [15:8] */
2485
2486/*
2416 * R707 (0x2C3) - Mic noise mix control 1 2487 * R707 (0x2C3) - Mic noise mix control 1
2417 */ 2488 */
2418#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */ 2489#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */
@@ -2528,6 +2599,12 @@
2528/* 2599/*
2529 * R785 (0x311) - ADC Digital Volume 1L 2600 * R785 (0x311) - ADC Digital Volume 1L
2530 */ 2601 */
2602#define ARIZONA_IN1L_SRC_MASK 0x4000 /* IN1L_SRC - [14] */
2603#define ARIZONA_IN1L_SRC_SHIFT 14 /* IN1L_SRC - [14] */
2604#define ARIZONA_IN1L_SRC_WIDTH 1 /* IN1L_SRC - [14] */
2605#define ARIZONA_IN1L_SRC_SE_MASK 0x2000 /* IN1L_SRC - [13] */
2606#define ARIZONA_IN1L_SRC_SE_SHIFT 13 /* IN1L_SRC - [13] */
2607#define ARIZONA_IN1L_SRC_SE_WIDTH 1 /* IN1L_SRC - [13] */
2531#define ARIZONA_IN_VU 0x0200 /* IN_VU */ 2608#define ARIZONA_IN_VU 0x0200 /* IN_VU */
2532#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ 2609#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
2533#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ 2610#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2560,6 +2637,12 @@
2560/* 2637/*
2561 * R789 (0x315) - ADC Digital Volume 1R 2638 * R789 (0x315) - ADC Digital Volume 1R
2562 */ 2639 */
2640#define ARIZONA_IN1R_SRC_MASK 0x4000 /* IN1R_SRC - [14] */
2641#define ARIZONA_IN1R_SRC_SHIFT 14 /* IN1R_SRC - [14] */
2642#define ARIZONA_IN1R_SRC_WIDTH 1 /* IN1R_SRC - [14] */
2643#define ARIZONA_IN1R_SRC_SE_MASK 0x2000 /* IN1R_SRC - [13] */
2644#define ARIZONA_IN1R_SRC_SE_SHIFT 13 /* IN1R_SRC - [13] */
2645#define ARIZONA_IN1R_SRC_SE_WIDTH 1 /* IN1R_SRC - [13] */
2563#define ARIZONA_IN_VU 0x0200 /* IN_VU */ 2646#define ARIZONA_IN_VU 0x0200 /* IN_VU */
2564#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ 2647#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
2565#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ 2648#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2604,6 +2687,12 @@
2604/* 2687/*
2605 * R793 (0x319) - ADC Digital Volume 2L 2688 * R793 (0x319) - ADC Digital Volume 2L
2606 */ 2689 */
2690#define ARIZONA_IN2L_SRC_MASK 0x4000 /* IN2L_SRC - [14] */
2691#define ARIZONA_IN2L_SRC_SHIFT 14 /* IN2L_SRC - [14] */
2692#define ARIZONA_IN2L_SRC_WIDTH 1 /* IN2L_SRC - [14] */
2693#define ARIZONA_IN2L_SRC_SE_MASK 0x2000 /* IN2L_SRC - [13] */
2694#define ARIZONA_IN2L_SRC_SE_SHIFT 13 /* IN2L_SRC - [13] */
2695#define ARIZONA_IN2L_SRC_SE_WIDTH 1 /* IN2L_SRC - [13] */
2607#define ARIZONA_IN_VU 0x0200 /* IN_VU */ 2696#define ARIZONA_IN_VU 0x0200 /* IN_VU */
2608#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ 2697#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
2609#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ 2698#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -3412,11 +3501,45 @@
3412#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */ 3501#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
3413 3502
3414/* 3503/*
3504 * R1088 (0x440) - DRE Enable (WM8998)
3505 */
3506#define WM8998_DRE3L_ENA 0x0020 /* DRE3L_ENA */
3507#define WM8998_DRE3L_ENA_MASK 0x0020 /* DRE3L_ENA */
3508#define WM8998_DRE3L_ENA_SHIFT 5 /* DRE3L_ENA */
3509#define WM8998_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */
3510#define WM8998_DRE2L_ENA 0x0008 /* DRE2L_ENA */
3511#define WM8998_DRE2L_ENA_MASK 0x0008 /* DRE2L_ENA */
3512#define WM8998_DRE2L_ENA_SHIFT 3 /* DRE2L_ENA */
3513#define WM8998_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */
3514#define WM8998_DRE2R_ENA 0x0004 /* DRE2R_ENA */
3515#define WM8998_DRE2R_ENA_MASK 0x0004 /* DRE2R_ENA */
3516#define WM8998_DRE2R_ENA_SHIFT 2 /* DRE2R_ENA */
3517#define WM8998_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */
3518#define WM8998_DRE1L_ENA 0x0002 /* DRE1L_ENA */
3519#define WM8998_DRE1L_ENA_MASK 0x0002 /* DRE1L_ENA */
3520#define WM8998_DRE1L_ENA_SHIFT 1 /* DRE1L_ENA */
3521#define WM8998_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
3522#define WM8998_DRE1R_ENA 0x0001 /* DRE1R_ENA */
3523#define WM8998_DRE1R_ENA_MASK 0x0001 /* DRE1R_ENA */
3524#define WM8998_DRE1R_ENA_SHIFT 0 /* DRE1R_ENA */
3525#define WM8998_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */
3526
3527/*
3528 * R1089 (0x441) - DRE Control 1
3529 */
3530#define ARIZONA_DRE_ENV_TC_FAST_MASK 0x0F00 /* DRE_ENV_TC_FAST - [11:8] */
3531#define ARIZONA_DRE_ENV_TC_FAST_SHIFT 8 /* DRE_ENV_TC_FAST - [11:8] */
3532#define ARIZONA_DRE_ENV_TC_FAST_WIDTH 4 /* DRE_ENV_TC_FAST - [11:8] */
3533
3534/*
3415 * R1090 (0x442) - DRE Control 2 3535 * R1090 (0x442) - DRE Control 2
3416 */ 3536 */
3417#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */ 3537#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */
3418#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */ 3538#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */
3419#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */ 3539#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */
3540#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK 0x000F /* DRE_ALOG_VOL_DELAY - [3:0] */
3541#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT 0 /* DRE_ALOG_VOL_DELAY - [3:0] */
3542#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH 4 /* DRE_ALOG_VOL_DELAY - [3:0] */
3420 3543
3421/* 3544/*
3422 * R1091 (0x443) - DRE Control 3 3545 * R1091 (0x443) - DRE Control 3
@@ -3428,6 +3551,49 @@
3428#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */ 3551#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */
3429#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */ 3552#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */
3430 3553
3554/* R486 (0x448) - EDRE_Enable
3555 */
3556#define ARIZONA_EDRE_OUT4L_THR2_ENA 0x0200 /* EDRE_OUT4L_THR2_ENA */
3557#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK 0x0200 /* EDRE_OUT4L_THR2_ENA */
3558#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT 9 /* EDRE_OUT4L_THR2_ENA */
3559#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH 1 /* EDRE_OUT4L_THR2_ENA */
3560#define ARIZONA_EDRE_OUT4R_THR2_ENA 0x0100 /* EDRE_OUT4R_THR2_ENA */
3561#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK 0x0100 /* EDRE_OUT4R_THR2_ENA */
3562#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT 8 /* EDRE_OUT4R_THR2_ENA */
3563#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH 1 /* EDRE_OUT4R_THR2_ENA */
3564#define ARIZONA_EDRE_OUT4L_THR1_ENA 0x0080 /* EDRE_OUT4L_THR1_ENA */
3565#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK 0x0080 /* EDRE_OUT4L_THR1_ENA */
3566#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT 7 /* EDRE_OUT4L_THR1_ENA */
3567#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH 1 /* EDRE_OUT4L_THR1_ENA */
3568#define ARIZONA_EDRE_OUT4R_THR1_ENA 0x0040 /* EDRE_OUT4R_THR1_ENA */
3569#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK 0x0040 /* EDRE_OUT4R_THR1_ENA */
3570#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT 6 /* EDRE_OUT4R_THR1_ENA */
3571#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH 1 /* EDRE_OUT4R_THR1_ENA */
3572#define ARIZONA_EDRE_OUT3L_THR1_ENA 0x0020 /* EDRE_OUT3L_THR1_ENA */
3573#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK 0x0020 /* EDRE_OUT3L_THR1_ENA */
3574#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT 5 /* EDRE_OUT3L_THR1_ENA */
3575#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH 1 /* EDRE_OUT3L_THR1_ENA */
3576#define ARIZONA_EDRE_OUT3R_THR1_ENA 0x0010 /* EDRE_OUT3R_THR1_ENA */
3577#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK 0x0010 /* EDRE_OUT3R_THR1_ENA */
3578#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT 4 /* EDRE_OUT3R_THR1_ENA */
3579#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH 1 /* EDRE_OUT3R_THR1_ENA */
3580#define ARIZONA_EDRE_OUT2L_THR1_ENA 0x0008 /* EDRE_OUT2L_THR1_ENA */
3581#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK 0x0008 /* EDRE_OUT2L_THR1_ENA */
3582#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT 3 /* EDRE_OUT2L_THR1_ENA */
3583#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH 1 /* EDRE_OUT2L_THR1_ENA */
3584#define ARIZONA_EDRE_OUT2R_THR1_ENA 0x0004 /* EDRE_OUT2R_THR1_ENA */
3585#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK 0x0004 /* EDRE_OUT2R_THR1_ENA */
3586#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT 2 /* EDRE_OUT2R_THR1_ENA */
3587#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH 1 /* EDRE_OUT2R_THR1_ENA */
3588#define ARIZONA_EDRE_OUT1L_THR1_ENA 0x0002 /* EDRE_OUT1L_THR1_ENA */
3589#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK 0x0002 /* EDRE_OUT1L_THR1_ENA */
3590#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT 1 /* EDRE_OUT1L_THR1_ENA */
3591#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH 1 /* EDRE_OUT1L_THR1_ENA */
3592#define ARIZONA_EDRE_OUT1R_THR1_ENA 0x0001 /* EDRE_OUT1R_THR1_ENA */
3593#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK 0x0001 /* EDRE_OUT1R_THR1_ENA */
3594#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT 0 /* EDRE_OUT1R_THR1_ENA */
3595#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH 1 /* EDRE_OUT1R_THR1_ENA */
3596
3431/* 3597/*
3432 * R1104 (0x450) - DAC AEC Control 1 3598 * R1104 (0x450) - DAC AEC Control 1
3433 */ 3599 */
@@ -4308,6 +4474,86 @@
4308#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */ 4474#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */
4309 4475
4310/* 4476/*
4477 * R1474 (0x5C2) - SPD1 TX Control
4478 */
4479#define ARIZONA_SPD1_VAL2 0x2000 /* SPD1_VAL2 */
4480#define ARIZONA_SPD1_VAL2_MASK 0x2000 /* SPD1_VAL2 */
4481#define ARIZONA_SPD1_VAL2_SHIFT 13 /* SPD1_VAL2 */
4482#define ARIZONA_SPD1_VAL2_WIDTH 1 /* SPD1_VAL2 */
4483#define ARIZONA_SPD1_VAL1 0x1000 /* SPD1_VAL1 */
4484#define ARIZONA_SPD1_VAL1_MASK 0x1000 /* SPD1_VAL1 */
4485#define ARIZONA_SPD1_VAL1_SHIFT 12 /* SPD1_VAL1 */
4486#define ARIZONA_SPD1_VAL1_WIDTH 1 /* SPD1_VAL1 */
4487#define ARIZONA_SPD1_RATE_MASK 0x00F0 /* SPD1_RATE */
4488#define ARIZONA_SPD1_RATE_SHIFT 4 /* SPD1_RATE */
4489#define ARIZONA_SPD1_RATE_WIDTH 4 /* SPD1_RATE */
4490#define ARIZONA_SPD1_ENA 0x0001 /* SPD1_ENA */
4491#define ARIZONA_SPD1_ENA_MASK 0x0001 /* SPD1_ENA */
4492#define ARIZONA_SPD1_ENA_SHIFT 0 /* SPD1_ENA */
4493#define ARIZONA_SPD1_ENA_WIDTH 1 /* SPD1_ENA */
4494
4495/*
4496 * R1475 (0x5C3) - SPD1 TX Channel Status 1
4497 */
4498#define ARIZONA_SPD1_CATCODE_MASK 0xFF00 /* SPD1_CATCODE */
4499#define ARIZONA_SPD1_CATCODE_SHIFT 8 /* SPD1_CATCODE */
4500#define ARIZONA_SPD1_CATCODE_WIDTH 8 /* SPD1_CATCODE */
4501#define ARIZONA_SPD1_CHSTMODE_MASK 0x00C0 /* SPD1_CHSTMODE */
4502#define ARIZONA_SPD1_CHSTMODE_SHIFT 6 /* SPD1_CHSTMODE */
4503#define ARIZONA_SPD1_CHSTMODE_WIDTH 2 /* SPD1_CHSTMODE */
4504#define ARIZONA_SPD1_PREEMPH_MASK 0x0038 /* SPD1_PREEMPH */
4505#define ARIZONA_SPD1_PREEMPH_SHIFT 3 /* SPD1_PREEMPH */
4506#define ARIZONA_SPD1_PREEMPH_WIDTH 3 /* SPD1_PREEMPH */
4507#define ARIZONA_SPD1_NOCOPY 0x0004 /* SPD1_NOCOPY */
4508#define ARIZONA_SPD1_NOCOPY_MASK 0x0004 /* SPD1_NOCOPY */
4509#define ARIZONA_SPD1_NOCOPY_SHIFT 2 /* SPD1_NOCOPY */
4510#define ARIZONA_SPD1_NOCOPY_WIDTH 1 /* SPD1_NOCOPY */
4511#define ARIZONA_SPD1_NOAUDIO 0x0002 /* SPD1_NOAUDIO */
4512#define ARIZONA_SPD1_NOAUDIO_MASK 0x0002 /* SPD1_NOAUDIO */
4513#define ARIZONA_SPD1_NOAUDIO_SHIFT 1 /* SPD1_NOAUDIO */
4514#define ARIZONA_SPD1_NOAUDIO_WIDTH 1 /* SPD1_NOAUDIO */
4515#define ARIZONA_SPD1_PRO 0x0001 /* SPD1_PRO */
4516#define ARIZONA_SPD1_PRO_MASK 0x0001 /* SPD1_PRO */
4517#define ARIZONA_SPD1_PRO_SHIFT 0 /* SPD1_PRO */
4518#define ARIZONA_SPD1_PRO_WIDTH 1 /* SPD1_PRO */
4519
4520/*
4521 * R1475 (0x5C4) - SPD1 TX Channel Status 2
4522 */
4523#define ARIZONA_SPD1_FREQ_MASK 0xF000 /* SPD1_FREQ */
4524#define ARIZONA_SPD1_FREQ_SHIFT 12 /* SPD1_FREQ */
4525#define ARIZONA_SPD1_FREQ_WIDTH 4 /* SPD1_FREQ */
4526#define ARIZONA_SPD1_CHNUM2_MASK 0x0F00 /* SPD1_CHNUM2 */
4527#define ARIZONA_SPD1_CHNUM2_SHIFT 8 /* SPD1_CHNUM2 */
4528#define ARIZONA_SPD1_CHNUM2_WIDTH 4 /* SPD1_CHNUM2 */
4529#define ARIZONA_SPD1_CHNUM1_MASK 0x00F0 /* SPD1_CHNUM1 */
4530#define ARIZONA_SPD1_CHNUM1_SHIFT 4 /* SPD1_CHNUM1 */
4531#define ARIZONA_SPD1_CHNUM1_WIDTH 4 /* SPD1_CHNUM1 */
4532#define ARIZONA_SPD1_SRCNUM_MASK 0x000F /* SPD1_SRCNUM */
4533#define ARIZONA_SPD1_SRCNUM_SHIFT 0 /* SPD1_SRCNUM */
4534#define ARIZONA_SPD1_SRCNUM_WIDTH 4 /* SPD1_SRCNUM */
4535
4536/*
4537 * R1475 (0x5C5) - SPD1 TX Channel Status 3
4538 */
4539#define ARIZONA_SPD1_ORGSAMP_MASK 0x0F00 /* SPD1_ORGSAMP */
4540#define ARIZONA_SPD1_ORGSAMP_SHIFT 8 /* SPD1_ORGSAMP */
4541#define ARIZONA_SPD1_ORGSAMP_WIDTH 4 /* SPD1_ORGSAMP */
4542#define ARIZONA_SPD1_TXWL_MASK 0x00E0 /* SPD1_TXWL */
4543#define ARIZONA_SPD1_TXWL_SHIFT 5 /* SPD1_TXWL */
4544#define ARIZONA_SPD1_TXWL_WIDTH 3 /* SPD1_TXWL */
4545#define ARIZONA_SPD1_MAXWL 0x0010 /* SPD1_MAXWL */
4546#define ARIZONA_SPD1_MAXWL_MASK 0x0010 /* SPD1_MAXWL */
4547#define ARIZONA_SPD1_MAXWL_SHIFT 4 /* SPD1_MAXWL */
4548#define ARIZONA_SPD1_MAXWL_WIDTH 1 /* SPD1_MAXWL */
4549#define ARIZONA_SPD1_CS31_30_MASK 0x000C /* SPD1_CS31_30 */
4550#define ARIZONA_SPD1_CS31_30_SHIFT 2 /* SPD1_CS31_30 */
4551#define ARIZONA_SPD1_CS31_30_WIDTH 2 /* SPD1_CS31_30 */
4552#define ARIZONA_SPD1_CLKACU_MASK 0x0003 /* SPD1_CLKACU */
4553#define ARIZONA_SPD1_CLKACU_SHIFT 2 /* SPD1_CLKACU */
4554#define ARIZONA_SPD1_CLKACU_WIDTH 0 /* SPD1_CLKACU */
4555
4556/*
4311 * R1507 (0x5E3) - SLIMbus Framer Ref Gear 4557 * R1507 (0x5E3) - SLIMbus Framer Ref Gear
4312 */ 4558 */
4313#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */ 4559#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */
@@ -4562,6 +4808,13 @@
4562#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */ 4808#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */
4563 4809
4564/* 4810/*
4811 * R3096 (0xC18) - GP Switch 1
4812 */
4813#define ARIZONA_SW1_MODE_MASK 0x0003 /* SW1_MODE - [1:0] */
4814#define ARIZONA_SW1_MODE_SHIFT 0 /* SW1_MODE - [1:0] */
4815#define ARIZONA_SW1_MODE_WIDTH 2 /* SW1_MODE - [1:0] */
4816
4817/*
4565 * R3104 (0xC20) - Misc Pad Ctrl 1 4818 * R3104 (0xC20) - Misc Pad Ctrl 1
4566 */ 4819 */
4567#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */ 4820#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */
@@ -6301,6 +6554,10 @@
6301/* 6554/*
6302 * R3366 (0xD26) - Interrupt Raw Status 8 6555 * R3366 (0xD26) - Interrupt Raw Status 8
6303 */ 6556 */
6557#define ARIZONA_SPDIF_OVERCLOCKED_STS 0x8000 /* SPDIF_OVERCLOCKED_STS */
6558#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK 0x8000 /* SPDIF_OVERCLOCKED_STS */
6559#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT 15 /* SPDIF_OVERCLOCKED_STS */
6560#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH 1 /* SPDIF_OVERCLOCKED_STS */
6304#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */ 6561#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */
6305#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */ 6562#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */
6306#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */ 6563#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index c2aa853fb412..cc8ad1e1a307 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,7 +12,8 @@
12#define __LINUX_MFD_AXP20X_H 12#define __LINUX_MFD_AXP20X_H
13 13
14enum { 14enum {
15 AXP202_ID = 0, 15 AXP152_ID = 0,
16 AXP202_ID,
16 AXP209_ID, 17 AXP209_ID,
17 AXP221_ID, 18 AXP221_ID,
18 AXP288_ID, 19 AXP288_ID,
@@ -22,6 +23,24 @@ enum {
22#define AXP20X_DATACACHE(m) (0x04 + (m)) 23#define AXP20X_DATACACHE(m) (0x04 + (m))
23 24
24/* Power supply */ 25/* Power supply */
26#define AXP152_PWR_OP_MODE 0x01
27#define AXP152_LDO3456_DC1234_CTRL 0x12
28#define AXP152_ALDO_OP_MODE 0x13
29#define AXP152_LDO0_CTRL 0x15
30#define AXP152_DCDC2_V_OUT 0x23
31#define AXP152_DCDC2_V_SCAL 0x25
32#define AXP152_DCDC1_V_OUT 0x26
33#define AXP152_DCDC3_V_OUT 0x27
34#define AXP152_ALDO12_V_OUT 0x28
35#define AXP152_DLDO1_V_OUT 0x29
36#define AXP152_DLDO2_V_OUT 0x2a
37#define AXP152_DCDC4_V_OUT 0x2b
38#define AXP152_V_OFF 0x31
39#define AXP152_OFF_CTRL 0x32
40#define AXP152_PEK_KEY 0x36
41#define AXP152_DCDC_FREQ 0x37
42#define AXP152_DCDC_MODE 0x80
43
25#define AXP20X_PWR_INPUT_STATUS 0x00 44#define AXP20X_PWR_INPUT_STATUS 0x00
26#define AXP20X_PWR_OP_MODE 0x01 45#define AXP20X_PWR_OP_MODE 0x01
27#define AXP20X_USB_OTG_STATUS 0x02 46#define AXP20X_USB_OTG_STATUS 0x02
@@ -69,6 +88,13 @@ enum {
69#define AXP22X_CHRG_CTRL3 0x35 88#define AXP22X_CHRG_CTRL3 0x35
70 89
71/* Interrupt */ 90/* Interrupt */
91#define AXP152_IRQ1_EN 0x40
92#define AXP152_IRQ2_EN 0x41
93#define AXP152_IRQ3_EN 0x42
94#define AXP152_IRQ1_STATE 0x48
95#define AXP152_IRQ2_STATE 0x49
96#define AXP152_IRQ3_STATE 0x4a
97
72#define AXP20X_IRQ1_EN 0x40 98#define AXP20X_IRQ1_EN 0x40
73#define AXP20X_IRQ2_EN 0x41 99#define AXP20X_IRQ2_EN 0x41
74#define AXP20X_IRQ3_EN 0x42 100#define AXP20X_IRQ3_EN 0x42
@@ -127,6 +153,19 @@ enum {
127#define AXP22X_PWREN_CTRL2 0x8d 153#define AXP22X_PWREN_CTRL2 0x8d
128 154
129/* GPIO */ 155/* GPIO */
156#define AXP152_GPIO0_CTRL 0x90
157#define AXP152_GPIO1_CTRL 0x91
158#define AXP152_GPIO2_CTRL 0x92
159#define AXP152_GPIO3_CTRL 0x93
160#define AXP152_LDOGPIO2_V_OUT 0x96
161#define AXP152_GPIO_INPUT 0x97
162#define AXP152_PWM0_FREQ_X 0x98
163#define AXP152_PWM0_FREQ_Y 0x99
164#define AXP152_PWM0_DUTY_CYCLE 0x9a
165#define AXP152_PWM1_FREQ_X 0x9b
166#define AXP152_PWM1_FREQ_Y 0x9c
167#define AXP152_PWM1_DUTY_CYCLE 0x9d
168
130#define AXP20X_GPIO0_CTRL 0x90 169#define AXP20X_GPIO0_CTRL 0x90
131#define AXP20X_LDO5_V_OUT 0x91 170#define AXP20X_LDO5_V_OUT 0x91
132#define AXP20X_GPIO1_CTRL 0x92 171#define AXP20X_GPIO1_CTRL 0x92
@@ -151,6 +190,12 @@ enum {
151#define AXP20X_CC_CTRL 0xb8 190#define AXP20X_CC_CTRL 0xb8
152#define AXP20X_FG_RES 0xb9 191#define AXP20X_FG_RES 0xb9
153 192
193/* OCV */
194#define AXP20X_RDC_H 0xba
195#define AXP20X_RDC_L 0xbb
196#define AXP20X_OCV(m) (0xc0 + (m))
197#define AXP20X_OCV_MAX 0xf
198
154/* AXP22X specific registers */ 199/* AXP22X specific registers */
155#define AXP22X_BATLOW_THRES1 0xe6 200#define AXP22X_BATLOW_THRES1 0xe6
156 201
@@ -218,6 +263,26 @@ enum {
218 263
219/* IRQs */ 264/* IRQs */
220enum { 265enum {
266 AXP152_IRQ_LDO0IN_CONNECT = 1,
267 AXP152_IRQ_LDO0IN_REMOVAL,
268 AXP152_IRQ_ALDO0IN_CONNECT,
269 AXP152_IRQ_ALDO0IN_REMOVAL,
270 AXP152_IRQ_DCDC1_V_LOW,
271 AXP152_IRQ_DCDC2_V_LOW,
272 AXP152_IRQ_DCDC3_V_LOW,
273 AXP152_IRQ_DCDC4_V_LOW,
274 AXP152_IRQ_PEK_SHORT,
275 AXP152_IRQ_PEK_LONG,
276 AXP152_IRQ_TIMER,
277 AXP152_IRQ_PEK_RIS_EDGE,
278 AXP152_IRQ_PEK_FAL_EDGE,
279 AXP152_IRQ_GPIO3_INPUT,
280 AXP152_IRQ_GPIO2_INPUT,
281 AXP152_IRQ_GPIO1_INPUT,
282 AXP152_IRQ_GPIO0_INPUT,
283};
284
285enum {
221 AXP20X_IRQ_ACIN_OVER_V = 1, 286 AXP20X_IRQ_ACIN_OVER_V = 1,
222 AXP20X_IRQ_ACIN_PLUGIN, 287 AXP20X_IRQ_ACIN_PLUGIN,
223 AXP20X_IRQ_ACIN_REMOVAL, 288 AXP20X_IRQ_ACIN_REMOVAL,
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
new file mode 100644
index 000000000000..376ba84366a0
--- /dev/null
+++ b/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2015 Dialog Semiconductor Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __MFD_DA9062_CORE_H__
16#define __MFD_DA9062_CORE_H__
17
18#include <linux/interrupt.h>
19#include <linux/mfd/da9062/registers.h>
20
21/* Interrupts */
22enum da9062_irqs {
23 /* IRQ A */
24 DA9062_IRQ_ONKEY,
25 DA9062_IRQ_ALARM,
26 DA9062_IRQ_TICK,
27 DA9062_IRQ_WDG_WARN,
28 DA9062_IRQ_SEQ_RDY,
29 /* IRQ B*/
30 DA9062_IRQ_TEMP,
31 DA9062_IRQ_LDO_LIM,
32 DA9062_IRQ_DVC_RDY,
33 DA9062_IRQ_VDD_WARN,
34 /* IRQ C */
35 DA9062_IRQ_GPI0,
36 DA9062_IRQ_GPI1,
37 DA9062_IRQ_GPI2,
38 DA9062_IRQ_GPI3,
39 DA9062_IRQ_GPI4,
40
41 DA9062_NUM_IRQ,
42};
43
44struct da9062 {
45 struct device *dev;
46 struct regmap *regmap;
47 struct regmap_irq_chip_data *regmap_irq;
48};
49
50#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
new file mode 100644
index 000000000000..97790d1b02c5
--- /dev/null
+++ b/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
1/*
2 * registers.h - REGISTERS H for DA9062
3 * Copyright (C) 2015 Dialog Semiconductor Ltd.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DA9062_H__
17#define __DA9062_H__
18
19#define DA9062_PMIC_DEVICE_ID 0x62
20#define DA9062_PMIC_VARIANT_MRC_AA 0x01
21
22#define DA9062_I2C_PAGE_SEL_SHIFT 1
23
24/*
25 * Registers
26 */
27
28#define DA9062AA_PAGE_CON 0x000
29#define DA9062AA_STATUS_A 0x001
30#define DA9062AA_STATUS_B 0x002
31#define DA9062AA_STATUS_D 0x004
32#define DA9062AA_FAULT_LOG 0x005
33#define DA9062AA_EVENT_A 0x006
34#define DA9062AA_EVENT_B 0x007
35#define DA9062AA_EVENT_C 0x008
36#define DA9062AA_IRQ_MASK_A 0x00A
37#define DA9062AA_IRQ_MASK_B 0x00B
38#define DA9062AA_IRQ_MASK_C 0x00C
39#define DA9062AA_CONTROL_A 0x00E
40#define DA9062AA_CONTROL_B 0x00F
41#define DA9062AA_CONTROL_C 0x010
42#define DA9062AA_CONTROL_D 0x011
43#define DA9062AA_CONTROL_E 0x012
44#define DA9062AA_CONTROL_F 0x013
45#define DA9062AA_PD_DIS 0x014
46#define DA9062AA_GPIO_0_1 0x015
47#define DA9062AA_GPIO_2_3 0x016
48#define DA9062AA_GPIO_4 0x017
49#define DA9062AA_GPIO_WKUP_MODE 0x01C
50#define DA9062AA_GPIO_MODE0_4 0x01D
51#define DA9062AA_GPIO_OUT0_2 0x01E
52#define DA9062AA_GPIO_OUT3_4 0x01F
53#define DA9062AA_BUCK2_CONT 0x020
54#define DA9062AA_BUCK1_CONT 0x021
55#define DA9062AA_BUCK4_CONT 0x022
56#define DA9062AA_BUCK3_CONT 0x024
57#define DA9062AA_LDO1_CONT 0x026
58#define DA9062AA_LDO2_CONT 0x027
59#define DA9062AA_LDO3_CONT 0x028
60#define DA9062AA_LDO4_CONT 0x029
61#define DA9062AA_DVC_1 0x032
62#define DA9062AA_COUNT_S 0x040
63#define DA9062AA_COUNT_MI 0x041
64#define DA9062AA_COUNT_H 0x042
65#define DA9062AA_COUNT_D 0x043
66#define DA9062AA_COUNT_MO 0x044
67#define DA9062AA_COUNT_Y 0x045
68#define DA9062AA_ALARM_S 0x046
69#define DA9062AA_ALARM_MI 0x047
70#define DA9062AA_ALARM_H 0x048
71#define DA9062AA_ALARM_D 0x049
72#define DA9062AA_ALARM_MO 0x04A
73#define DA9062AA_ALARM_Y 0x04B
74#define DA9062AA_SECOND_A 0x04C
75#define DA9062AA_SECOND_B 0x04D
76#define DA9062AA_SECOND_C 0x04E
77#define DA9062AA_SECOND_D 0x04F
78#define DA9062AA_SEQ 0x081
79#define DA9062AA_SEQ_TIMER 0x082
80#define DA9062AA_ID_2_1 0x083
81#define DA9062AA_ID_4_3 0x084
82#define DA9062AA_ID_12_11 0x088
83#define DA9062AA_ID_14_13 0x089
84#define DA9062AA_ID_16_15 0x08A
85#define DA9062AA_ID_22_21 0x08D
86#define DA9062AA_ID_24_23 0x08E
87#define DA9062AA_ID_26_25 0x08F
88#define DA9062AA_ID_28_27 0x090
89#define DA9062AA_ID_30_29 0x091
90#define DA9062AA_ID_32_31 0x092
91#define DA9062AA_SEQ_A 0x095
92#define DA9062AA_SEQ_B 0x096
93#define DA9062AA_WAIT 0x097
94#define DA9062AA_EN_32K 0x098
95#define DA9062AA_RESET 0x099
96#define DA9062AA_BUCK_ILIM_A 0x09A
97#define DA9062AA_BUCK_ILIM_B 0x09B
98#define DA9062AA_BUCK_ILIM_C 0x09C
99#define DA9062AA_BUCK2_CFG 0x09D
100#define DA9062AA_BUCK1_CFG 0x09E
101#define DA9062AA_BUCK4_CFG 0x09F
102#define DA9062AA_BUCK3_CFG 0x0A0
103#define DA9062AA_VBUCK2_A 0x0A3
104#define DA9062AA_VBUCK1_A 0x0A4
105#define DA9062AA_VBUCK4_A 0x0A5
106#define DA9062AA_VBUCK3_A 0x0A7
107#define DA9062AA_VLDO1_A 0x0A9
108#define DA9062AA_VLDO2_A 0x0AA
109#define DA9062AA_VLDO3_A 0x0AB
110#define DA9062AA_VLDO4_A 0x0AC
111#define DA9062AA_VBUCK2_B 0x0B4
112#define DA9062AA_VBUCK1_B 0x0B5
113#define DA9062AA_VBUCK4_B 0x0B6
114#define DA9062AA_VBUCK3_B 0x0B8
115#define DA9062AA_VLDO1_B 0x0BA
116#define DA9062AA_VLDO2_B 0x0BB
117#define DA9062AA_VLDO3_B 0x0BC
118#define DA9062AA_VLDO4_B 0x0BD
119#define DA9062AA_BBAT_CONT 0x0C5
120#define DA9062AA_INTERFACE 0x105
121#define DA9062AA_CONFIG_A 0x106
122#define DA9062AA_CONFIG_B 0x107
123#define DA9062AA_CONFIG_C 0x108
124#define DA9062AA_CONFIG_D 0x109
125#define DA9062AA_CONFIG_E 0x10A
126#define DA9062AA_CONFIG_G 0x10C
127#define DA9062AA_CONFIG_H 0x10D
128#define DA9062AA_CONFIG_I 0x10E
129#define DA9062AA_CONFIG_J 0x10F
130#define DA9062AA_CONFIG_K 0x110
131#define DA9062AA_CONFIG_M 0x112
132#define DA9062AA_TRIM_CLDR 0x120
133#define DA9062AA_GP_ID_0 0x121
134#define DA9062AA_GP_ID_1 0x122
135#define DA9062AA_GP_ID_2 0x123
136#define DA9062AA_GP_ID_3 0x124
137#define DA9062AA_GP_ID_4 0x125
138#define DA9062AA_GP_ID_5 0x126
139#define DA9062AA_GP_ID_6 0x127
140#define DA9062AA_GP_ID_7 0x128
141#define DA9062AA_GP_ID_8 0x129
142#define DA9062AA_GP_ID_9 0x12A
143#define DA9062AA_GP_ID_10 0x12B
144#define DA9062AA_GP_ID_11 0x12C
145#define DA9062AA_GP_ID_12 0x12D
146#define DA9062AA_GP_ID_13 0x12E
147#define DA9062AA_GP_ID_14 0x12F
148#define DA9062AA_GP_ID_15 0x130
149#define DA9062AA_GP_ID_16 0x131
150#define DA9062AA_GP_ID_17 0x132
151#define DA9062AA_GP_ID_18 0x133
152#define DA9062AA_GP_ID_19 0x134
153#define DA9062AA_DEVICE_ID 0x181
154#define DA9062AA_VARIANT_ID 0x182
155#define DA9062AA_CUSTOMER_ID 0x183
156#define DA9062AA_CONFIG_ID 0x184
157
158/*
159 * Bit fields
160 */
161
162/* DA9062AA_PAGE_CON = 0x000 */
163#define DA9062AA_PAGE_SHIFT 0
164#define DA9062AA_PAGE_MASK 0x3f
165#define DA9062AA_WRITE_MODE_SHIFT 6
166#define DA9062AA_WRITE_MODE_MASK BIT(6)
167#define DA9062AA_REVERT_SHIFT 7
168#define DA9062AA_REVERT_MASK BIT(7)
169
170/* DA9062AA_STATUS_A = 0x001 */
171#define DA9062AA_NONKEY_SHIFT 0
172#define DA9062AA_NONKEY_MASK 0x01
173#define DA9062AA_DVC_BUSY_SHIFT 2
174#define DA9062AA_DVC_BUSY_MASK BIT(2)
175
176/* DA9062AA_STATUS_B = 0x002 */
177#define DA9062AA_GPI0_SHIFT 0
178#define DA9062AA_GPI0_MASK 0x01
179#define DA9062AA_GPI1_SHIFT 1
180#define DA9062AA_GPI1_MASK BIT(1)
181#define DA9062AA_GPI2_SHIFT 2
182#define DA9062AA_GPI2_MASK BIT(2)
183#define DA9062AA_GPI3_SHIFT 3
184#define DA9062AA_GPI3_MASK BIT(3)
185#define DA9062AA_GPI4_SHIFT 4
186#define DA9062AA_GPI4_MASK BIT(4)
187
188/* DA9062AA_STATUS_D = 0x004 */
189#define DA9062AA_LDO1_ILIM_SHIFT 0
190#define DA9062AA_LDO1_ILIM_MASK 0x01
191#define DA9062AA_LDO2_ILIM_SHIFT 1
192#define DA9062AA_LDO2_ILIM_MASK BIT(1)
193#define DA9062AA_LDO3_ILIM_SHIFT 2
194#define DA9062AA_LDO3_ILIM_MASK BIT(2)
195#define DA9062AA_LDO4_ILIM_SHIFT 3
196#define DA9062AA_LDO4_ILIM_MASK BIT(3)
197
198/* DA9062AA_FAULT_LOG = 0x005 */
199#define DA9062AA_TWD_ERROR_SHIFT 0
200#define DA9062AA_TWD_ERROR_MASK 0x01
201#define DA9062AA_POR_SHIFT 1
202#define DA9062AA_POR_MASK BIT(1)
203#define DA9062AA_VDD_FAULT_SHIFT 2
204#define DA9062AA_VDD_FAULT_MASK BIT(2)
205#define DA9062AA_VDD_START_SHIFT 3
206#define DA9062AA_VDD_START_MASK BIT(3)
207#define DA9062AA_TEMP_CRIT_SHIFT 4
208#define DA9062AA_TEMP_CRIT_MASK BIT(4)
209#define DA9062AA_KEY_RESET_SHIFT 5
210#define DA9062AA_KEY_RESET_MASK BIT(5)
211#define DA9062AA_NSHUTDOWN_SHIFT 6
212#define DA9062AA_NSHUTDOWN_MASK BIT(6)
213#define DA9062AA_WAIT_SHUT_SHIFT 7
214#define DA9062AA_WAIT_SHUT_MASK BIT(7)
215
216/* DA9062AA_EVENT_A = 0x006 */
217#define DA9062AA_E_NONKEY_SHIFT 0
218#define DA9062AA_E_NONKEY_MASK 0x01
219#define DA9062AA_E_ALARM_SHIFT 1
220#define DA9062AA_E_ALARM_MASK BIT(1)
221#define DA9062AA_E_TICK_SHIFT 2
222#define DA9062AA_E_TICK_MASK BIT(2)
223#define DA9062AA_E_WDG_WARN_SHIFT 3
224#define DA9062AA_E_WDG_WARN_MASK BIT(3)
225#define DA9062AA_E_SEQ_RDY_SHIFT 4
226#define DA9062AA_E_SEQ_RDY_MASK BIT(4)
227#define DA9062AA_EVENTS_B_SHIFT 5
228#define DA9062AA_EVENTS_B_MASK BIT(5)
229#define DA9062AA_EVENTS_C_SHIFT 6
230#define DA9062AA_EVENTS_C_MASK BIT(6)
231
232/* DA9062AA_EVENT_B = 0x007 */
233#define DA9062AA_E_TEMP_SHIFT 1
234#define DA9062AA_E_TEMP_MASK BIT(1)
235#define DA9062AA_E_LDO_LIM_SHIFT 3
236#define DA9062AA_E_LDO_LIM_MASK BIT(3)
237#define DA9062AA_E_DVC_RDY_SHIFT 5
238#define DA9062AA_E_DVC_RDY_MASK BIT(5)
239#define DA9062AA_E_VDD_WARN_SHIFT 7
240#define DA9062AA_E_VDD_WARN_MASK BIT(7)
241
242/* DA9062AA_EVENT_C = 0x008 */
243#define DA9062AA_E_GPI0_SHIFT 0
244#define DA9062AA_E_GPI0_MASK 0x01
245#define DA9062AA_E_GPI1_SHIFT 1
246#define DA9062AA_E_GPI1_MASK BIT(1)
247#define DA9062AA_E_GPI2_SHIFT 2
248#define DA9062AA_E_GPI2_MASK BIT(2)
249#define DA9062AA_E_GPI3_SHIFT 3
250#define DA9062AA_E_GPI3_MASK BIT(3)
251#define DA9062AA_E_GPI4_SHIFT 4
252#define DA9062AA_E_GPI4_MASK BIT(4)
253
254/* DA9062AA_IRQ_MASK_A = 0x00A */
255#define DA9062AA_M_NONKEY_SHIFT 0
256#define DA9062AA_M_NONKEY_MASK 0x01
257#define DA9062AA_M_ALARM_SHIFT 1
258#define DA9062AA_M_ALARM_MASK BIT(1)
259#define DA9062AA_M_TICK_SHIFT 2
260#define DA9062AA_M_TICK_MASK BIT(2)
261#define DA9062AA_M_WDG_WARN_SHIFT 3
262#define DA9062AA_M_WDG_WARN_MASK BIT(3)
263#define DA9062AA_M_SEQ_RDY_SHIFT 4
264#define DA9062AA_M_SEQ_RDY_MASK BIT(4)
265
266/* DA9062AA_IRQ_MASK_B = 0x00B */
267#define DA9062AA_M_TEMP_SHIFT 1
268#define DA9062AA_M_TEMP_MASK BIT(1)
269#define DA9062AA_M_LDO_LIM_SHIFT 3
270#define DA9062AA_M_LDO_LIM_MASK BIT(3)
271#define DA9062AA_M_DVC_RDY_SHIFT 5
272#define DA9062AA_M_DVC_RDY_MASK BIT(5)
273#define DA9062AA_M_VDD_WARN_SHIFT 7
274#define DA9062AA_M_VDD_WARN_MASK BIT(7)
275
276/* DA9062AA_IRQ_MASK_C = 0x00C */
277#define DA9062AA_M_GPI0_SHIFT 0
278#define DA9062AA_M_GPI0_MASK 0x01
279#define DA9062AA_M_GPI1_SHIFT 1
280#define DA9062AA_M_GPI1_MASK BIT(1)
281#define DA9062AA_M_GPI2_SHIFT 2
282#define DA9062AA_M_GPI2_MASK BIT(2)
283#define DA9062AA_M_GPI3_SHIFT 3
284#define DA9062AA_M_GPI3_MASK BIT(3)
285#define DA9062AA_M_GPI4_SHIFT 4
286#define DA9062AA_M_GPI4_MASK BIT(4)
287
288/* DA9062AA_CONTROL_A = 0x00E */
289#define DA9062AA_SYSTEM_EN_SHIFT 0
290#define DA9062AA_SYSTEM_EN_MASK 0x01
291#define DA9062AA_POWER_EN_SHIFT 1
292#define DA9062AA_POWER_EN_MASK BIT(1)
293#define DA9062AA_POWER1_EN_SHIFT 2
294#define DA9062AA_POWER1_EN_MASK BIT(2)
295#define DA9062AA_STANDBY_SHIFT 3
296#define DA9062AA_STANDBY_MASK BIT(3)
297#define DA9062AA_M_SYSTEM_EN_SHIFT 4
298#define DA9062AA_M_SYSTEM_EN_MASK BIT(4)
299#define DA9062AA_M_POWER_EN_SHIFT 5
300#define DA9062AA_M_POWER_EN_MASK BIT(5)
301#define DA9062AA_M_POWER1_EN_SHIFT 6
302#define DA9062AA_M_POWER1_EN_MASK BIT(6)
303
304/* DA9062AA_CONTROL_B = 0x00F */
305#define DA9062AA_WATCHDOG_PD_SHIFT 1
306#define DA9062AA_WATCHDOG_PD_MASK BIT(1)
307#define DA9062AA_FREEZE_EN_SHIFT 2
308#define DA9062AA_FREEZE_EN_MASK BIT(2)
309#define DA9062AA_NRES_MODE_SHIFT 3
310#define DA9062AA_NRES_MODE_MASK BIT(3)
311#define DA9062AA_NONKEY_LOCK_SHIFT 4
312#define DA9062AA_NONKEY_LOCK_MASK BIT(4)
313#define DA9062AA_NFREEZE_SHIFT 5
314#define DA9062AA_NFREEZE_MASK (0x03 << 5)
315#define DA9062AA_BUCK_SLOWSTART_SHIFT 7
316#define DA9062AA_BUCK_SLOWSTART_MASK BIT(7)
317
318/* DA9062AA_CONTROL_C = 0x010 */
319#define DA9062AA_DEBOUNCING_SHIFT 0
320#define DA9062AA_DEBOUNCING_MASK 0x07
321#define DA9062AA_AUTO_BOOT_SHIFT 3
322#define DA9062AA_AUTO_BOOT_MASK BIT(3)
323#define DA9062AA_OTPREAD_EN_SHIFT 4
324#define DA9062AA_OTPREAD_EN_MASK BIT(4)
325#define DA9062AA_SLEW_RATE_SHIFT 5
326#define DA9062AA_SLEW_RATE_MASK (0x03 << 5)
327#define DA9062AA_DEF_SUPPLY_SHIFT 7
328#define DA9062AA_DEF_SUPPLY_MASK BIT(7)
329
330/* DA9062AA_CONTROL_D = 0x011 */
331#define DA9062AA_TWDSCALE_SHIFT 0
332#define DA9062AA_TWDSCALE_MASK 0x07
333
334/* DA9062AA_CONTROL_E = 0x012 */
335#define DA9062AA_RTC_MODE_PD_SHIFT 0
336#define DA9062AA_RTC_MODE_PD_MASK 0x01
337#define DA9062AA_RTC_MODE_SD_SHIFT 1
338#define DA9062AA_RTC_MODE_SD_MASK BIT(1)
339#define DA9062AA_RTC_EN_SHIFT 2
340#define DA9062AA_RTC_EN_MASK BIT(2)
341#define DA9062AA_V_LOCK_SHIFT 7
342#define DA9062AA_V_LOCK_MASK BIT(7)
343
344/* DA9062AA_CONTROL_F = 0x013 */
345#define DA9062AA_WATCHDOG_SHIFT 0
346#define DA9062AA_WATCHDOG_MASK 0x01
347#define DA9062AA_SHUTDOWN_SHIFT 1
348#define DA9062AA_SHUTDOWN_MASK BIT(1)
349#define DA9062AA_WAKE_UP_SHIFT 2
350#define DA9062AA_WAKE_UP_MASK BIT(2)
351
352/* DA9062AA_PD_DIS = 0x014 */
353#define DA9062AA_GPI_DIS_SHIFT 0
354#define DA9062AA_GPI_DIS_MASK 0x01
355#define DA9062AA_PMIF_DIS_SHIFT 2
356#define DA9062AA_PMIF_DIS_MASK BIT(2)
357#define DA9062AA_CLDR_PAUSE_SHIFT 4
358#define DA9062AA_CLDR_PAUSE_MASK BIT(4)
359#define DA9062AA_BBAT_DIS_SHIFT 5
360#define DA9062AA_BBAT_DIS_MASK BIT(5)
361#define DA9062AA_OUT32K_PAUSE_SHIFT 6
362#define DA9062AA_OUT32K_PAUSE_MASK BIT(6)
363#define DA9062AA_PMCONT_DIS_SHIFT 7
364#define DA9062AA_PMCONT_DIS_MASK BIT(7)
365
366/* DA9062AA_GPIO_0_1 = 0x015 */
367#define DA9062AA_GPIO0_PIN_SHIFT 0
368#define DA9062AA_GPIO0_PIN_MASK 0x03
369#define DA9062AA_GPIO0_TYPE_SHIFT 2
370#define DA9062AA_GPIO0_TYPE_MASK BIT(2)
371#define DA9062AA_GPIO0_WEN_SHIFT 3
372#define DA9062AA_GPIO0_WEN_MASK BIT(3)
373#define DA9062AA_GPIO1_PIN_SHIFT 4
374#define DA9062AA_GPIO1_PIN_MASK (0x03 << 4)
375#define DA9062AA_GPIO1_TYPE_SHIFT 6
376#define DA9062AA_GPIO1_TYPE_MASK BIT(6)
377#define DA9062AA_GPIO1_WEN_SHIFT 7
378#define DA9062AA_GPIO1_WEN_MASK BIT(7)
379
380/* DA9062AA_GPIO_2_3 = 0x016 */
381#define DA9062AA_GPIO2_PIN_SHIFT 0
382#define DA9062AA_GPIO2_PIN_MASK 0x03
383#define DA9062AA_GPIO2_TYPE_SHIFT 2
384#define DA9062AA_GPIO2_TYPE_MASK BIT(2)
385#define DA9062AA_GPIO2_WEN_SHIFT 3
386#define DA9062AA_GPIO2_WEN_MASK BIT(3)
387#define DA9062AA_GPIO3_PIN_SHIFT 4
388#define DA9062AA_GPIO3_PIN_MASK (0x03 << 4)
389#define DA9062AA_GPIO3_TYPE_SHIFT 6
390#define DA9062AA_GPIO3_TYPE_MASK BIT(6)
391#define DA9062AA_GPIO3_WEN_SHIFT 7
392#define DA9062AA_GPIO3_WEN_MASK BIT(7)
393
394/* DA9062AA_GPIO_4 = 0x017 */
395#define DA9062AA_GPIO4_PIN_SHIFT 0
396#define DA9062AA_GPIO4_PIN_MASK 0x03
397#define DA9062AA_GPIO4_TYPE_SHIFT 2
398#define DA9062AA_GPIO4_TYPE_MASK BIT(2)
399#define DA9062AA_GPIO4_WEN_SHIFT 3
400#define DA9062AA_GPIO4_WEN_MASK BIT(3)
401
402/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
403#define DA9062AA_GPIO0_WKUP_MODE_SHIFT 0
404#define DA9062AA_GPIO0_WKUP_MODE_MASK 0x01
405#define DA9062AA_GPIO1_WKUP_MODE_SHIFT 1
406#define DA9062AA_GPIO1_WKUP_MODE_MASK BIT(1)
407#define DA9062AA_GPIO2_WKUP_MODE_SHIFT 2
408#define DA9062AA_GPIO2_WKUP_MODE_MASK BIT(2)
409#define DA9062AA_GPIO3_WKUP_MODE_SHIFT 3
410#define DA9062AA_GPIO3_WKUP_MODE_MASK BIT(3)
411#define DA9062AA_GPIO4_WKUP_MODE_SHIFT 4
412#define DA9062AA_GPIO4_WKUP_MODE_MASK BIT(4)
413
414/* DA9062AA_GPIO_MODE0_4 = 0x01D */
415#define DA9062AA_GPIO0_MODE_SHIFT 0
416#define DA9062AA_GPIO0_MODE_MASK 0x01
417#define DA9062AA_GPIO1_MODE_SHIFT 1
418#define DA9062AA_GPIO1_MODE_MASK BIT(1)
419#define DA9062AA_GPIO2_MODE_SHIFT 2
420#define DA9062AA_GPIO2_MODE_MASK BIT(2)
421#define DA9062AA_GPIO3_MODE_SHIFT 3
422#define DA9062AA_GPIO3_MODE_MASK BIT(3)
423#define DA9062AA_GPIO4_MODE_SHIFT 4
424#define DA9062AA_GPIO4_MODE_MASK BIT(4)
425
426/* DA9062AA_GPIO_OUT0_2 = 0x01E */
427#define DA9062AA_GPIO0_OUT_SHIFT 0
428#define DA9062AA_GPIO0_OUT_MASK 0x07
429#define DA9062AA_GPIO1_OUT_SHIFT 3
430#define DA9062AA_GPIO1_OUT_MASK (0x07 << 3)
431#define DA9062AA_GPIO2_OUT_SHIFT 6
432#define DA9062AA_GPIO2_OUT_MASK (0x03 << 6)
433
434/* DA9062AA_GPIO_OUT3_4 = 0x01F */
435#define DA9062AA_GPIO3_OUT_SHIFT 0
436#define DA9062AA_GPIO3_OUT_MASK 0x07
437#define DA9062AA_GPIO4_OUT_SHIFT 3
438#define DA9062AA_GPIO4_OUT_MASK (0x03 << 3)
439
440/* DA9062AA_BUCK2_CONT = 0x020 */
441#define DA9062AA_BUCK2_EN_SHIFT 0
442#define DA9062AA_BUCK2_EN_MASK 0x01
443#define DA9062AA_BUCK2_GPI_SHIFT 1
444#define DA9062AA_BUCK2_GPI_MASK (0x03 << 1)
445#define DA9062AA_BUCK2_CONF_SHIFT 3
446#define DA9062AA_BUCK2_CONF_MASK BIT(3)
447#define DA9062AA_VBUCK2_GPI_SHIFT 5
448#define DA9062AA_VBUCK2_GPI_MASK (0x03 << 5)
449
450/* DA9062AA_BUCK1_CONT = 0x021 */
451#define DA9062AA_BUCK1_EN_SHIFT 0
452#define DA9062AA_BUCK1_EN_MASK 0x01
453#define DA9062AA_BUCK1_GPI_SHIFT 1
454#define DA9062AA_BUCK1_GPI_MASK (0x03 << 1)
455#define DA9062AA_BUCK1_CONF_SHIFT 3
456#define DA9062AA_BUCK1_CONF_MASK BIT(3)
457#define DA9062AA_VBUCK1_GPI_SHIFT 5
458#define DA9062AA_VBUCK1_GPI_MASK (0x03 << 5)
459
460/* DA9062AA_BUCK4_CONT = 0x022 */
461#define DA9062AA_BUCK4_EN_SHIFT 0
462#define DA9062AA_BUCK4_EN_MASK 0x01
463#define DA9062AA_BUCK4_GPI_SHIFT 1
464#define DA9062AA_BUCK4_GPI_MASK (0x03 << 1)
465#define DA9062AA_BUCK4_CONF_SHIFT 3
466#define DA9062AA_BUCK4_CONF_MASK BIT(3)
467#define DA9062AA_VBUCK4_GPI_SHIFT 5
468#define DA9062AA_VBUCK4_GPI_MASK (0x03 << 5)
469
470/* DA9062AA_BUCK3_CONT = 0x024 */
471#define DA9062AA_BUCK3_EN_SHIFT 0
472#define DA9062AA_BUCK3_EN_MASK 0x01
473#define DA9062AA_BUCK3_GPI_SHIFT 1
474#define DA9062AA_BUCK3_GPI_MASK (0x03 << 1)
475#define DA9062AA_BUCK3_CONF_SHIFT 3
476#define DA9062AA_BUCK3_CONF_MASK BIT(3)
477#define DA9062AA_VBUCK3_GPI_SHIFT 5
478#define DA9062AA_VBUCK3_GPI_MASK (0x03 << 5)
479
480/* DA9062AA_LDO1_CONT = 0x026 */
481#define DA9062AA_LDO1_EN_SHIFT 0
482#define DA9062AA_LDO1_EN_MASK 0x01
483#define DA9062AA_LDO1_GPI_SHIFT 1
484#define DA9062AA_LDO1_GPI_MASK (0x03 << 1)
485#define DA9062AA_LDO1_PD_DIS_SHIFT 3
486#define DA9062AA_LDO1_PD_DIS_MASK BIT(3)
487#define DA9062AA_VLDO1_GPI_SHIFT 5
488#define DA9062AA_VLDO1_GPI_MASK (0x03 << 5)
489#define DA9062AA_LDO1_CONF_SHIFT 7
490#define DA9062AA_LDO1_CONF_MASK BIT(7)
491
492/* DA9062AA_LDO2_CONT = 0x027 */
493#define DA9062AA_LDO2_EN_SHIFT 0
494#define DA9062AA_LDO2_EN_MASK 0x01
495#define DA9062AA_LDO2_GPI_SHIFT 1
496#define DA9062AA_LDO2_GPI_MASK (0x03 << 1)
497#define DA9062AA_LDO2_PD_DIS_SHIFT 3
498#define DA9062AA_LDO2_PD_DIS_MASK BIT(3)
499#define DA9062AA_VLDO2_GPI_SHIFT 5
500#define DA9062AA_VLDO2_GPI_MASK (0x03 << 5)
501#define DA9062AA_LDO2_CONF_SHIFT 7
502#define DA9062AA_LDO2_CONF_MASK BIT(7)
503
504/* DA9062AA_LDO3_CONT = 0x028 */
505#define DA9062AA_LDO3_EN_SHIFT 0
506#define DA9062AA_LDO3_EN_MASK 0x01
507#define DA9062AA_LDO3_GPI_SHIFT 1
508#define DA9062AA_LDO3_GPI_MASK (0x03 << 1)
509#define DA9062AA_LDO3_PD_DIS_SHIFT 3
510#define DA9062AA_LDO3_PD_DIS_MASK BIT(3)
511#define DA9062AA_VLDO3_GPI_SHIFT 5
512#define DA9062AA_VLDO3_GPI_MASK (0x03 << 5)
513#define DA9062AA_LDO3_CONF_SHIFT 7
514#define DA9062AA_LDO3_CONF_MASK BIT(7)
515
516/* DA9062AA_LDO4_CONT = 0x029 */
517#define DA9062AA_LDO4_EN_SHIFT 0
518#define DA9062AA_LDO4_EN_MASK 0x01
519#define DA9062AA_LDO4_GPI_SHIFT 1
520#define DA9062AA_LDO4_GPI_MASK (0x03 << 1)
521#define DA9062AA_LDO4_PD_DIS_SHIFT 3
522#define DA9062AA_LDO4_PD_DIS_MASK BIT(3)
523#define DA9062AA_VLDO4_GPI_SHIFT 5
524#define DA9062AA_VLDO4_GPI_MASK (0x03 << 5)
525#define DA9062AA_LDO4_CONF_SHIFT 7
526#define DA9062AA_LDO4_CONF_MASK BIT(7)
527
528/* DA9062AA_DVC_1 = 0x032 */
529#define DA9062AA_VBUCK1_SEL_SHIFT 0
530#define DA9062AA_VBUCK1_SEL_MASK 0x01
531#define DA9062AA_VBUCK2_SEL_SHIFT 1
532#define DA9062AA_VBUCK2_SEL_MASK BIT(1)
533#define DA9062AA_VBUCK4_SEL_SHIFT 2
534#define DA9062AA_VBUCK4_SEL_MASK BIT(2)
535#define DA9062AA_VBUCK3_SEL_SHIFT 3
536#define DA9062AA_VBUCK3_SEL_MASK BIT(3)
537#define DA9062AA_VLDO1_SEL_SHIFT 4
538#define DA9062AA_VLDO1_SEL_MASK BIT(4)
539#define DA9062AA_VLDO2_SEL_SHIFT 5
540#define DA9062AA_VLDO2_SEL_MASK BIT(5)
541#define DA9062AA_VLDO3_SEL_SHIFT 6
542#define DA9062AA_VLDO3_SEL_MASK BIT(6)
543#define DA9062AA_VLDO4_SEL_SHIFT 7
544#define DA9062AA_VLDO4_SEL_MASK BIT(7)
545
546/* DA9062AA_COUNT_S = 0x040 */
547#define DA9062AA_COUNT_SEC_SHIFT 0
548#define DA9062AA_COUNT_SEC_MASK 0x3f
549#define DA9062AA_RTC_READ_SHIFT 7
550#define DA9062AA_RTC_READ_MASK BIT(7)
551
552/* DA9062AA_COUNT_MI = 0x041 */
553#define DA9062AA_COUNT_MIN_SHIFT 0
554#define DA9062AA_COUNT_MIN_MASK 0x3f
555
556/* DA9062AA_COUNT_H = 0x042 */
557#define DA9062AA_COUNT_HOUR_SHIFT 0
558#define DA9062AA_COUNT_HOUR_MASK 0x1f
559
560/* DA9062AA_COUNT_D = 0x043 */
561#define DA9062AA_COUNT_DAY_SHIFT 0
562#define DA9062AA_COUNT_DAY_MASK 0x1f
563
564/* DA9062AA_COUNT_MO = 0x044 */
565#define DA9062AA_COUNT_MONTH_SHIFT 0
566#define DA9062AA_COUNT_MONTH_MASK 0x0f
567
568/* DA9062AA_COUNT_Y = 0x045 */
569#define DA9062AA_COUNT_YEAR_SHIFT 0
570#define DA9062AA_COUNT_YEAR_MASK 0x3f
571#define DA9062AA_MONITOR_SHIFT 6
572#define DA9062AA_MONITOR_MASK BIT(6)
573
574/* DA9062AA_ALARM_S = 0x046 */
575#define DA9062AA_ALARM_SEC_SHIFT 0
576#define DA9062AA_ALARM_SEC_MASK 0x3f
577#define DA9062AA_ALARM_STATUS_SHIFT 6
578#define DA9062AA_ALARM_STATUS_MASK (0x03 << 6)
579
580/* DA9062AA_ALARM_MI = 0x047 */
581#define DA9062AA_ALARM_MIN_SHIFT 0
582#define DA9062AA_ALARM_MIN_MASK 0x3f
583
584/* DA9062AA_ALARM_H = 0x048 */
585#define DA9062AA_ALARM_HOUR_SHIFT 0
586#define DA9062AA_ALARM_HOUR_MASK 0x1f
587
588/* DA9062AA_ALARM_D = 0x049 */
589#define DA9062AA_ALARM_DAY_SHIFT 0
590#define DA9062AA_ALARM_DAY_MASK 0x1f
591
592/* DA9062AA_ALARM_MO = 0x04A */
593#define DA9062AA_ALARM_MONTH_SHIFT 0
594#define DA9062AA_ALARM_MONTH_MASK 0x0f
595#define DA9062AA_TICK_TYPE_SHIFT 4
596#define DA9062AA_TICK_TYPE_MASK BIT(4)
597#define DA9062AA_TICK_WAKE_SHIFT 5
598#define DA9062AA_TICK_WAKE_MASK BIT(5)
599
600/* DA9062AA_ALARM_Y = 0x04B */
601#define DA9062AA_ALARM_YEAR_SHIFT 0
602#define DA9062AA_ALARM_YEAR_MASK 0x3f
603#define DA9062AA_ALARM_ON_SHIFT 6
604#define DA9062AA_ALARM_ON_MASK BIT(6)
605#define DA9062AA_TICK_ON_SHIFT 7
606#define DA9062AA_TICK_ON_MASK BIT(7)
607
608/* DA9062AA_SECOND_A = 0x04C */
609#define DA9062AA_SECONDS_A_SHIFT 0
610#define DA9062AA_SECONDS_A_MASK 0xff
611
612/* DA9062AA_SECOND_B = 0x04D */
613#define DA9062AA_SECONDS_B_SHIFT 0
614#define DA9062AA_SECONDS_B_MASK 0xff
615
616/* DA9062AA_SECOND_C = 0x04E */
617#define DA9062AA_SECONDS_C_SHIFT 0
618#define DA9062AA_SECONDS_C_MASK 0xff
619
620/* DA9062AA_SECOND_D = 0x04F */
621#define DA9062AA_SECONDS_D_SHIFT 0
622#define DA9062AA_SECONDS_D_MASK 0xff
623
624/* DA9062AA_SEQ = 0x081 */
625#define DA9062AA_SEQ_POINTER_SHIFT 0
626#define DA9062AA_SEQ_POINTER_MASK 0x0f
627#define DA9062AA_NXT_SEQ_START_SHIFT 4
628#define DA9062AA_NXT_SEQ_START_MASK (0x0f << 4)
629
630/* DA9062AA_SEQ_TIMER = 0x082 */
631#define DA9062AA_SEQ_TIME_SHIFT 0
632#define DA9062AA_SEQ_TIME_MASK 0x0f
633#define DA9062AA_SEQ_DUMMY_SHIFT 4
634#define DA9062AA_SEQ_DUMMY_MASK (0x0f << 4)
635
636/* DA9062AA_ID_2_1 = 0x083 */
637#define DA9062AA_LDO1_STEP_SHIFT 0
638#define DA9062AA_LDO1_STEP_MASK 0x0f
639#define DA9062AA_LDO2_STEP_SHIFT 4
640#define DA9062AA_LDO2_STEP_MASK (0x0f << 4)
641
642/* DA9062AA_ID_4_3 = 0x084 */
643#define DA9062AA_LDO3_STEP_SHIFT 0
644#define DA9062AA_LDO3_STEP_MASK 0x0f
645#define DA9062AA_LDO4_STEP_SHIFT 4
646#define DA9062AA_LDO4_STEP_MASK (0x0f << 4)
647
648/* DA9062AA_ID_12_11 = 0x088 */
649#define DA9062AA_PD_DIS_STEP_SHIFT 4
650#define DA9062AA_PD_DIS_STEP_MASK (0x0f << 4)
651
652/* DA9062AA_ID_14_13 = 0x089 */
653#define DA9062AA_BUCK1_STEP_SHIFT 0
654#define DA9062AA_BUCK1_STEP_MASK 0x0f
655#define DA9062AA_BUCK2_STEP_SHIFT 4
656#define DA9062AA_BUCK2_STEP_MASK (0x0f << 4)
657
658/* DA9062AA_ID_16_15 = 0x08A */
659#define DA9062AA_BUCK4_STEP_SHIFT 0
660#define DA9062AA_BUCK4_STEP_MASK 0x0f
661#define DA9062AA_BUCK3_STEP_SHIFT 4
662#define DA9062AA_BUCK3_STEP_MASK (0x0f << 4)
663
664/* DA9062AA_ID_22_21 = 0x08D */
665#define DA9062AA_GP_RISE1_STEP_SHIFT 0
666#define DA9062AA_GP_RISE1_STEP_MASK 0x0f
667#define DA9062AA_GP_FALL1_STEP_SHIFT 4
668#define DA9062AA_GP_FALL1_STEP_MASK (0x0f << 4)
669
670/* DA9062AA_ID_24_23 = 0x08E */
671#define DA9062AA_GP_RISE2_STEP_SHIFT 0
672#define DA9062AA_GP_RISE2_STEP_MASK 0x0f
673#define DA9062AA_GP_FALL2_STEP_SHIFT 4
674#define DA9062AA_GP_FALL2_STEP_MASK (0x0f << 4)
675
676/* DA9062AA_ID_26_25 = 0x08F */
677#define DA9062AA_GP_RISE3_STEP_SHIFT 0
678#define DA9062AA_GP_RISE3_STEP_MASK 0x0f
679#define DA9062AA_GP_FALL3_STEP_SHIFT 4
680#define DA9062AA_GP_FALL3_STEP_MASK (0x0f << 4)
681
682/* DA9062AA_ID_28_27 = 0x090 */
683#define DA9062AA_GP_RISE4_STEP_SHIFT 0
684#define DA9062AA_GP_RISE4_STEP_MASK 0x0f
685#define DA9062AA_GP_FALL4_STEP_SHIFT 4
686#define DA9062AA_GP_FALL4_STEP_MASK (0x0f << 4)
687
688/* DA9062AA_ID_30_29 = 0x091 */
689#define DA9062AA_GP_RISE5_STEP_SHIFT 0
690#define DA9062AA_GP_RISE5_STEP_MASK 0x0f
691#define DA9062AA_GP_FALL5_STEP_SHIFT 4
692#define DA9062AA_GP_FALL5_STEP_MASK (0x0f << 4)
693
694/* DA9062AA_ID_32_31 = 0x092 */
695#define DA9062AA_WAIT_STEP_SHIFT 0
696#define DA9062AA_WAIT_STEP_MASK 0x0f
697#define DA9062AA_EN32K_STEP_SHIFT 4
698#define DA9062AA_EN32K_STEP_MASK (0x0f << 4)
699
700/* DA9062AA_SEQ_A = 0x095 */
701#define DA9062AA_SYSTEM_END_SHIFT 0
702#define DA9062AA_SYSTEM_END_MASK 0x0f
703#define DA9062AA_POWER_END_SHIFT 4
704#define DA9062AA_POWER_END_MASK (0x0f << 4)
705
706/* DA9062AA_SEQ_B = 0x096 */
707#define DA9062AA_MAX_COUNT_SHIFT 0
708#define DA9062AA_MAX_COUNT_MASK 0x0f
709#define DA9062AA_PART_DOWN_SHIFT 4
710#define DA9062AA_PART_DOWN_MASK (0x0f << 4)
711
712/* DA9062AA_WAIT = 0x097 */
713#define DA9062AA_WAIT_TIME_SHIFT 0
714#define DA9062AA_WAIT_TIME_MASK 0x0f
715#define DA9062AA_WAIT_MODE_SHIFT 4
716#define DA9062AA_WAIT_MODE_MASK BIT(4)
717#define DA9062AA_TIME_OUT_SHIFT 5
718#define DA9062AA_TIME_OUT_MASK BIT(5)
719#define DA9062AA_WAIT_DIR_SHIFT 6
720#define DA9062AA_WAIT_DIR_MASK (0x03 << 6)
721
722/* DA9062AA_EN_32K = 0x098 */
723#define DA9062AA_STABILISATION_TIME_SHIFT 0
724#define DA9062AA_STABILISATION_TIME_MASK 0x07
725#define DA9062AA_CRYSTAL_SHIFT 3
726#define DA9062AA_CRYSTAL_MASK BIT(3)
727#define DA9062AA_DELAY_MODE_SHIFT 4
728#define DA9062AA_DELAY_MODE_MASK BIT(4)
729#define DA9062AA_OUT_CLOCK_SHIFT 5
730#define DA9062AA_OUT_CLOCK_MASK BIT(5)
731#define DA9062AA_RTC_CLOCK_SHIFT 6
732#define DA9062AA_RTC_CLOCK_MASK BIT(6)
733#define DA9062AA_EN_32KOUT_SHIFT 7
734#define DA9062AA_EN_32KOUT_MASK BIT(7)
735
736/* DA9062AA_RESET = 0x099 */
737#define DA9062AA_RESET_TIMER_SHIFT 0
738#define DA9062AA_RESET_TIMER_MASK 0x3f
739#define DA9062AA_RESET_EVENT_SHIFT 6
740#define DA9062AA_RESET_EVENT_MASK (0x03 << 6)
741
742/* DA9062AA_BUCK_ILIM_A = 0x09A */
743#define DA9062AA_BUCK3_ILIM_SHIFT 0
744#define DA9062AA_BUCK3_ILIM_MASK 0x0f
745
746/* DA9062AA_BUCK_ILIM_B = 0x09B */
747#define DA9062AA_BUCK4_ILIM_SHIFT 0
748#define DA9062AA_BUCK4_ILIM_MASK 0x0f
749
750/* DA9062AA_BUCK_ILIM_C = 0x09C */
751#define DA9062AA_BUCK1_ILIM_SHIFT 0
752#define DA9062AA_BUCK1_ILIM_MASK 0x0f
753#define DA9062AA_BUCK2_ILIM_SHIFT 4
754#define DA9062AA_BUCK2_ILIM_MASK (0x0f << 4)
755
756/* DA9062AA_BUCK2_CFG = 0x09D */
757#define DA9062AA_BUCK2_PD_DIS_SHIFT 5
758#define DA9062AA_BUCK2_PD_DIS_MASK BIT(5)
759#define DA9062AA_BUCK2_MODE_SHIFT 6
760#define DA9062AA_BUCK2_MODE_MASK (0x03 << 6)
761
762/* DA9062AA_BUCK1_CFG = 0x09E */
763#define DA9062AA_BUCK1_PD_DIS_SHIFT 5
764#define DA9062AA_BUCK1_PD_DIS_MASK BIT(5)
765#define DA9062AA_BUCK1_MODE_SHIFT 6
766#define DA9062AA_BUCK1_MODE_MASK (0x03 << 6)
767
768/* DA9062AA_BUCK4_CFG = 0x09F */
769#define DA9062AA_BUCK4_VTTR_EN_SHIFT 3
770#define DA9062AA_BUCK4_VTTR_EN_MASK BIT(3)
771#define DA9062AA_BUCK4_VTT_EN_SHIFT 4
772#define DA9062AA_BUCK4_VTT_EN_MASK BIT(4)
773#define DA9062AA_BUCK4_PD_DIS_SHIFT 5
774#define DA9062AA_BUCK4_PD_DIS_MASK BIT(5)
775#define DA9062AA_BUCK4_MODE_SHIFT 6
776#define DA9062AA_BUCK4_MODE_MASK (0x03 << 6)
777
778/* DA9062AA_BUCK3_CFG = 0x0A0 */
779#define DA9062AA_BUCK3_PD_DIS_SHIFT 5
780#define DA9062AA_BUCK3_PD_DIS_MASK BIT(5)
781#define DA9062AA_BUCK3_MODE_SHIFT 6
782#define DA9062AA_BUCK3_MODE_MASK (0x03 << 6)
783
784/* DA9062AA_VBUCK2_A = 0x0A3 */
785#define DA9062AA_VBUCK2_A_SHIFT 0
786#define DA9062AA_VBUCK2_A_MASK 0x7f
787#define DA9062AA_BUCK2_SL_A_SHIFT 7
788#define DA9062AA_BUCK2_SL_A_MASK BIT(7)
789
790/* DA9062AA_VBUCK1_A = 0x0A4 */
791#define DA9062AA_VBUCK1_A_SHIFT 0
792#define DA9062AA_VBUCK1_A_MASK 0x7f
793#define DA9062AA_BUCK1_SL_A_SHIFT 7
794#define DA9062AA_BUCK1_SL_A_MASK BIT(7)
795
796/* DA9062AA_VBUCK4_A = 0x0A5 */
797#define DA9062AA_VBUCK4_A_SHIFT 0
798#define DA9062AA_VBUCK4_A_MASK 0x7f
799#define DA9062AA_BUCK4_SL_A_SHIFT 7
800#define DA9062AA_BUCK4_SL_A_MASK BIT(7)
801
802/* DA9062AA_VBUCK3_A = 0x0A7 */
803#define DA9062AA_VBUCK3_A_SHIFT 0
804#define DA9062AA_VBUCK3_A_MASK 0x7f
805#define DA9062AA_BUCK3_SL_A_SHIFT 7
806#define DA9062AA_BUCK3_SL_A_MASK BIT(7)
807
808/* DA9062AA_VLDO1_A = 0x0A9 */
809#define DA9062AA_VLDO1_A_SHIFT 0
810#define DA9062AA_VLDO1_A_MASK 0x3f
811#define DA9062AA_LDO1_SL_A_SHIFT 7
812#define DA9062AA_LDO1_SL_A_MASK BIT(7)
813
814/* DA9062AA_VLDO2_A = 0x0AA */
815#define DA9062AA_VLDO2_A_SHIFT 0
816#define DA9062AA_VLDO2_A_MASK 0x3f
817#define DA9062AA_LDO2_SL_A_SHIFT 7
818#define DA9062AA_LDO2_SL_A_MASK BIT(7)
819
820/* DA9062AA_VLDO3_A = 0x0AB */
821#define DA9062AA_VLDO3_A_SHIFT 0
822#define DA9062AA_VLDO3_A_MASK 0x3f
823#define DA9062AA_LDO3_SL_A_SHIFT 7
824#define DA9062AA_LDO3_SL_A_MASK BIT(7)
825
826/* DA9062AA_VLDO4_A = 0x0AC */
827#define DA9062AA_VLDO4_A_SHIFT 0
828#define DA9062AA_VLDO4_A_MASK 0x3f
829#define DA9062AA_LDO4_SL_A_SHIFT 7
830#define DA9062AA_LDO4_SL_A_MASK BIT(7)
831
832/* DA9062AA_VBUCK2_B = 0x0B4 */
833#define DA9062AA_VBUCK2_B_SHIFT 0
834#define DA9062AA_VBUCK2_B_MASK 0x7f
835#define DA9062AA_BUCK2_SL_B_SHIFT 7
836#define DA9062AA_BUCK2_SL_B_MASK BIT(7)
837
838/* DA9062AA_VBUCK1_B = 0x0B5 */
839#define DA9062AA_VBUCK1_B_SHIFT 0
840#define DA9062AA_VBUCK1_B_MASK 0x7f
841#define DA9062AA_BUCK1_SL_B_SHIFT 7
842#define DA9062AA_BUCK1_SL_B_MASK BIT(7)
843
844/* DA9062AA_VBUCK4_B = 0x0B6 */
845#define DA9062AA_VBUCK4_B_SHIFT 0
846#define DA9062AA_VBUCK4_B_MASK 0x7f
847#define DA9062AA_BUCK4_SL_B_SHIFT 7
848#define DA9062AA_BUCK4_SL_B_MASK BIT(7)
849
850/* DA9062AA_VBUCK3_B = 0x0B8 */
851#define DA9062AA_VBUCK3_B_SHIFT 0
852#define DA9062AA_VBUCK3_B_MASK 0x7f
853#define DA9062AA_BUCK3_SL_B_SHIFT 7
854#define DA9062AA_BUCK3_SL_B_MASK BIT(7)
855
856/* DA9062AA_VLDO1_B = 0x0BA */
857#define DA9062AA_VLDO1_B_SHIFT 0
858#define DA9062AA_VLDO1_B_MASK 0x3f
859#define DA9062AA_LDO1_SL_B_SHIFT 7
860#define DA9062AA_LDO1_SL_B_MASK BIT(7)
861
862/* DA9062AA_VLDO2_B = 0x0BB */
863#define DA9062AA_VLDO2_B_SHIFT 0
864#define DA9062AA_VLDO2_B_MASK 0x3f
865#define DA9062AA_LDO2_SL_B_SHIFT 7
866#define DA9062AA_LDO2_SL_B_MASK BIT(7)
867
868/* DA9062AA_VLDO3_B = 0x0BC */
869#define DA9062AA_VLDO3_B_SHIFT 0
870#define DA9062AA_VLDO3_B_MASK 0x3f
871#define DA9062AA_LDO3_SL_B_SHIFT 7
872#define DA9062AA_LDO3_SL_B_MASK BIT(7)
873
874/* DA9062AA_VLDO4_B = 0x0BD */
875#define DA9062AA_VLDO4_B_SHIFT 0
876#define DA9062AA_VLDO4_B_MASK 0x3f
877#define DA9062AA_LDO4_SL_B_SHIFT 7
878#define DA9062AA_LDO4_SL_B_MASK BIT(7)
879
880/* DA9062AA_BBAT_CONT = 0x0C5 */
881#define DA9062AA_BCHG_VSET_SHIFT 0
882#define DA9062AA_BCHG_VSET_MASK 0x0f
883#define DA9062AA_BCHG_ISET_SHIFT 4
884#define DA9062AA_BCHG_ISET_MASK (0x0f << 4)
885
886/* DA9062AA_INTERFACE = 0x105 */
887#define DA9062AA_IF_BASE_ADDR_SHIFT 4
888#define DA9062AA_IF_BASE_ADDR_MASK (0x0f << 4)
889
890/* DA9062AA_CONFIG_A = 0x106 */
891#define DA9062AA_PM_I_V_SHIFT 0
892#define DA9062AA_PM_I_V_MASK 0x01
893#define DA9062AA_PM_O_TYPE_SHIFT 2
894#define DA9062AA_PM_O_TYPE_MASK BIT(2)
895#define DA9062AA_IRQ_TYPE_SHIFT 3
896#define DA9062AA_IRQ_TYPE_MASK BIT(3)
897#define DA9062AA_PM_IF_V_SHIFT 4
898#define DA9062AA_PM_IF_V_MASK BIT(4)
899#define DA9062AA_PM_IF_FMP_SHIFT 5
900#define DA9062AA_PM_IF_FMP_MASK BIT(5)
901#define DA9062AA_PM_IF_HSM_SHIFT 6
902#define DA9062AA_PM_IF_HSM_MASK BIT(6)
903
904/* DA9062AA_CONFIG_B = 0x107 */
905#define DA9062AA_VDD_FAULT_ADJ_SHIFT 0
906#define DA9062AA_VDD_FAULT_ADJ_MASK 0x0f
907#define DA9062AA_VDD_HYST_ADJ_SHIFT 4
908#define DA9062AA_VDD_HYST_ADJ_MASK (0x07 << 4)
909
910/* DA9062AA_CONFIG_C = 0x108 */
911#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT 2
912#define DA9062AA_BUCK_ACTV_DISCHRG_MASK BIT(2)
913#define DA9062AA_BUCK1_CLK_INV_SHIFT 3
914#define DA9062AA_BUCK1_CLK_INV_MASK BIT(3)
915#define DA9062AA_BUCK4_CLK_INV_SHIFT 4
916#define DA9062AA_BUCK4_CLK_INV_MASK BIT(4)
917#define DA9062AA_BUCK3_CLK_INV_SHIFT 6
918#define DA9062AA_BUCK3_CLK_INV_MASK BIT(6)
919
920/* DA9062AA_CONFIG_D = 0x109 */
921#define DA9062AA_GPI_V_SHIFT 0
922#define DA9062AA_GPI_V_MASK 0x01
923#define DA9062AA_NIRQ_MODE_SHIFT 1
924#define DA9062AA_NIRQ_MODE_MASK BIT(1)
925#define DA9062AA_SYSTEM_EN_RD_SHIFT 2
926#define DA9062AA_SYSTEM_EN_RD_MASK BIT(2)
927#define DA9062AA_FORCE_RESET_SHIFT 5
928#define DA9062AA_FORCE_RESET_MASK BIT(5)
929
930/* DA9062AA_CONFIG_E = 0x10A */
931#define DA9062AA_BUCK1_AUTO_SHIFT 0
932#define DA9062AA_BUCK1_AUTO_MASK 0x01
933#define DA9062AA_BUCK2_AUTO_SHIFT 1
934#define DA9062AA_BUCK2_AUTO_MASK BIT(1)
935#define DA9062AA_BUCK4_AUTO_SHIFT 2
936#define DA9062AA_BUCK4_AUTO_MASK BIT(2)
937#define DA9062AA_BUCK3_AUTO_SHIFT 4
938#define DA9062AA_BUCK3_AUTO_MASK BIT(4)
939
940/* DA9062AA_CONFIG_G = 0x10C */
941#define DA9062AA_LDO1_AUTO_SHIFT 0
942#define DA9062AA_LDO1_AUTO_MASK 0x01
943#define DA9062AA_LDO2_AUTO_SHIFT 1
944#define DA9062AA_LDO2_AUTO_MASK BIT(1)
945#define DA9062AA_LDO3_AUTO_SHIFT 2
946#define DA9062AA_LDO3_AUTO_MASK BIT(2)
947#define DA9062AA_LDO4_AUTO_SHIFT 3
948#define DA9062AA_LDO4_AUTO_MASK BIT(3)
949
950/* DA9062AA_CONFIG_H = 0x10D */
951#define DA9062AA_BUCK1_2_MERGE_SHIFT 3
952#define DA9062AA_BUCK1_2_MERGE_MASK BIT(3)
953#define DA9062AA_BUCK2_OD_SHIFT 5
954#define DA9062AA_BUCK2_OD_MASK BIT(5)
955#define DA9062AA_BUCK1_OD_SHIFT 6
956#define DA9062AA_BUCK1_OD_MASK BIT(6)
957
958/* DA9062AA_CONFIG_I = 0x10E */
959#define DA9062AA_NONKEY_PIN_SHIFT 0
960#define DA9062AA_NONKEY_PIN_MASK 0x03
961#define DA9062AA_nONKEY_SD_SHIFT 2
962#define DA9062AA_nONKEY_SD_MASK BIT(2)
963#define DA9062AA_WATCHDOG_SD_SHIFT 3
964#define DA9062AA_WATCHDOG_SD_MASK BIT(3)
965#define DA9062AA_KEY_SD_MODE_SHIFT 4
966#define DA9062AA_KEY_SD_MODE_MASK BIT(4)
967#define DA9062AA_HOST_SD_MODE_SHIFT 5
968#define DA9062AA_HOST_SD_MODE_MASK BIT(5)
969#define DA9062AA_INT_SD_MODE_SHIFT 6
970#define DA9062AA_INT_SD_MODE_MASK BIT(6)
971#define DA9062AA_LDO_SD_SHIFT 7
972#define DA9062AA_LDO_SD_MASK BIT(7)
973
974/* DA9062AA_CONFIG_J = 0x10F */
975#define DA9062AA_KEY_DELAY_SHIFT 0
976#define DA9062AA_KEY_DELAY_MASK 0x03
977#define DA9062AA_SHUT_DELAY_SHIFT 2
978#define DA9062AA_SHUT_DELAY_MASK (0x03 << 2)
979#define DA9062AA_RESET_DURATION_SHIFT 4
980#define DA9062AA_RESET_DURATION_MASK (0x03 << 4)
981#define DA9062AA_TWOWIRE_TO_SHIFT 6
982#define DA9062AA_TWOWIRE_TO_MASK BIT(6)
983#define DA9062AA_IF_RESET_SHIFT 7
984#define DA9062AA_IF_RESET_MASK BIT(7)
985
986/* DA9062AA_CONFIG_K = 0x110 */
987#define DA9062AA_GPIO0_PUPD_SHIFT 0
988#define DA9062AA_GPIO0_PUPD_MASK 0x01
989#define DA9062AA_GPIO1_PUPD_SHIFT 1
990#define DA9062AA_GPIO1_PUPD_MASK BIT(1)
991#define DA9062AA_GPIO2_PUPD_SHIFT 2
992#define DA9062AA_GPIO2_PUPD_MASK BIT(2)
993#define DA9062AA_GPIO3_PUPD_SHIFT 3
994#define DA9062AA_GPIO3_PUPD_MASK BIT(3)
995#define DA9062AA_GPIO4_PUPD_SHIFT 4
996#define DA9062AA_GPIO4_PUPD_MASK BIT(4)
997
998/* DA9062AA_CONFIG_M = 0x112 */
999#define DA9062AA_NSHUTDOWN_PU_SHIFT 1
1000#define DA9062AA_NSHUTDOWN_PU_MASK BIT(1)
1001#define DA9062AA_WDG_MODE_SHIFT 3
1002#define DA9062AA_WDG_MODE_MASK BIT(3)
1003#define DA9062AA_OSC_FRQ_SHIFT 4
1004#define DA9062AA_OSC_FRQ_MASK (0x0f << 4)
1005
1006/* DA9062AA_TRIM_CLDR = 0x120 */
1007#define DA9062AA_TRIM_CLDR_SHIFT 0
1008#define DA9062AA_TRIM_CLDR_MASK 0xff
1009
1010/* DA9062AA_GP_ID_0 = 0x121 */
1011#define DA9062AA_GP_0_SHIFT 0
1012#define DA9062AA_GP_0_MASK 0xff
1013
1014/* DA9062AA_GP_ID_1 = 0x122 */
1015#define DA9062AA_GP_1_SHIFT 0
1016#define DA9062AA_GP_1_MASK 0xff
1017
1018/* DA9062AA_GP_ID_2 = 0x123 */
1019#define DA9062AA_GP_2_SHIFT 0
1020#define DA9062AA_GP_2_MASK 0xff
1021
1022/* DA9062AA_GP_ID_3 = 0x124 */
1023#define DA9062AA_GP_3_SHIFT 0
1024#define DA9062AA_GP_3_MASK 0xff
1025
1026/* DA9062AA_GP_ID_4 = 0x125 */
1027#define DA9062AA_GP_4_SHIFT 0
1028#define DA9062AA_GP_4_MASK 0xff
1029
1030/* DA9062AA_GP_ID_5 = 0x126 */
1031#define DA9062AA_GP_5_SHIFT 0
1032#define DA9062AA_GP_5_MASK 0xff
1033
1034/* DA9062AA_GP_ID_6 = 0x127 */
1035#define DA9062AA_GP_6_SHIFT 0
1036#define DA9062AA_GP_6_MASK 0xff
1037
1038/* DA9062AA_GP_ID_7 = 0x128 */
1039#define DA9062AA_GP_7_SHIFT 0
1040#define DA9062AA_GP_7_MASK 0xff
1041
1042/* DA9062AA_GP_ID_8 = 0x129 */
1043#define DA9062AA_GP_8_SHIFT 0
1044#define DA9062AA_GP_8_MASK 0xff
1045
1046/* DA9062AA_GP_ID_9 = 0x12A */
1047#define DA9062AA_GP_9_SHIFT 0
1048#define DA9062AA_GP_9_MASK 0xff
1049
1050/* DA9062AA_GP_ID_10 = 0x12B */
1051#define DA9062AA_GP_10_SHIFT 0
1052#define DA9062AA_GP_10_MASK 0xff
1053
1054/* DA9062AA_GP_ID_11 = 0x12C */
1055#define DA9062AA_GP_11_SHIFT 0
1056#define DA9062AA_GP_11_MASK 0xff
1057
1058/* DA9062AA_GP_ID_12 = 0x12D */
1059#define DA9062AA_GP_12_SHIFT 0
1060#define DA9062AA_GP_12_MASK 0xff
1061
1062/* DA9062AA_GP_ID_13 = 0x12E */
1063#define DA9062AA_GP_13_SHIFT 0
1064#define DA9062AA_GP_13_MASK 0xff
1065
1066/* DA9062AA_GP_ID_14 = 0x12F */
1067#define DA9062AA_GP_14_SHIFT 0
1068#define DA9062AA_GP_14_MASK 0xff
1069
1070/* DA9062AA_GP_ID_15 = 0x130 */
1071#define DA9062AA_GP_15_SHIFT 0
1072#define DA9062AA_GP_15_MASK 0xff
1073
1074/* DA9062AA_GP_ID_16 = 0x131 */
1075#define DA9062AA_GP_16_SHIFT 0
1076#define DA9062AA_GP_16_MASK 0xff
1077
1078/* DA9062AA_GP_ID_17 = 0x132 */
1079#define DA9062AA_GP_17_SHIFT 0
1080#define DA9062AA_GP_17_MASK 0xff
1081
1082/* DA9062AA_GP_ID_18 = 0x133 */
1083#define DA9062AA_GP_18_SHIFT 0
1084#define DA9062AA_GP_18_MASK 0xff
1085
1086/* DA9062AA_GP_ID_19 = 0x134 */
1087#define DA9062AA_GP_19_SHIFT 0
1088#define DA9062AA_GP_19_MASK 0xff
1089
1090/* DA9062AA_DEVICE_ID = 0x181 */
1091#define DA9062AA_DEV_ID_SHIFT 0
1092#define DA9062AA_DEV_ID_MASK 0xff
1093
1094/* DA9062AA_VARIANT_ID = 0x182 */
1095#define DA9062AA_VRC_SHIFT 0
1096#define DA9062AA_VRC_MASK 0x0f
1097#define DA9062AA_MRC_SHIFT 4
1098#define DA9062AA_MRC_MASK (0x0f << 4)
1099
1100/* DA9062AA_CUSTOMER_ID = 0x183 */
1101#define DA9062AA_CUST_ID_SHIFT 0
1102#define DA9062AA_CUST_ID_MASK 0xff
1103
1104/* DA9062AA_CONFIG_ID = 0x184 */
1105#define DA9062AA_CONFIG_REV_SHIFT 0
1106#define DA9062AA_CONFIG_REV_MASK 0xff
1107
1108#endif /* __DA9062_H__ */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 79f4d822ba13..621af82123c6 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@ enum da9063_irqs {
51 DA9063_IRQ_COMP_1V2, 51 DA9063_IRQ_COMP_1V2,
52 DA9063_IRQ_LDO_LIM, 52 DA9063_IRQ_LDO_LIM,
53 DA9063_IRQ_REG_UVOV, 53 DA9063_IRQ_REG_UVOV,
54 DA9063_IRQ_DVC_RDY,
54 DA9063_IRQ_VDD_MON, 55 DA9063_IRQ_VDD_MON,
55 DA9063_IRQ_WARN, 56 DA9063_IRQ_WARN,
56 DA9063_IRQ_GPI0, 57 DA9063_IRQ_GPI0,
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 8feac782fa83..2b300b44f994 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
20#ifndef LPC_ICH_H 20#ifndef LPC_ICH_H
21#define LPC_ICH_H 21#define LPC_ICH_H
22 22
23/* Watchdog resources */
24#define ICH_RES_IO_TCO 0
25#define ICH_RES_IO_SMI 1
26#define ICH_RES_MEM_OFF 2
27#define ICH_RES_MEM_GCS_PMC 0
28
29/* GPIO resources */ 23/* GPIO resources */
30#define ICH_RES_GPIO 0 24#define ICH_RES_GPIO 0
31#define ICH_RES_GPE0 1 25#define ICH_RES_GPE0 1
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
new file mode 100644
index 000000000000..095b121aa725
--- /dev/null
+++ b/include/linux/mfd/max77693-common.h
@@ -0,0 +1,49 @@
1/*
2 * Common data shared between Maxim 77693 and 77843 drivers
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __LINUX_MFD_MAX77693_COMMON_H
13#define __LINUX_MFD_MAX77693_COMMON_H
14
15enum max77693_types {
16 TYPE_MAX77693_UNKNOWN,
17 TYPE_MAX77693,
18 TYPE_MAX77843,
19
20 TYPE_MAX77693_NUM,
21};
22
23/*
24 * Shared also with max77843.
25 */
26struct max77693_dev {
27 struct device *dev;
28 struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
29 struct i2c_client *i2c_muic; /* 0x4A , MUIC */
30 struct i2c_client *i2c_haptic; /* MAX77693: 0x90 , Haptic */
31 struct i2c_client *i2c_chg; /* MAX77843: 0xD2, Charger */
32
33 enum max77693_types type;
34
35 struct regmap *regmap;
36 struct regmap *regmap_muic;
37 struct regmap *regmap_haptic; /* Only MAX77693 */
38 struct regmap *regmap_chg; /* Only MAX77843 */
39
40 struct regmap_irq_chip_data *irq_data_led;
41 struct regmap_irq_chip_data *irq_data_topsys;
42 struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */
43 struct regmap_irq_chip_data *irq_data_muic;
44
45 int irq;
46};
47
48
49#endif /* __LINUX_MFD_MAX77693_COMMON_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 51633ea6f910..3c7a63b98ad6 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -310,30 +310,30 @@ enum max77693_muic_reg {
310#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT) 310#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
311 311
312/* MAX77693 MUIC - STATUS1~3 Register */ 312/* MAX77693 MUIC - STATUS1~3 Register */
313#define STATUS1_ADC_SHIFT (0) 313#define MAX77693_STATUS1_ADC_SHIFT 0
314#define STATUS1_ADCLOW_SHIFT (5) 314#define MAX77693_STATUS1_ADCLOW_SHIFT 5
315#define STATUS1_ADCERR_SHIFT (6) 315#define MAX77693_STATUS1_ADCERR_SHIFT 6
316#define STATUS1_ADC1K_SHIFT (7) 316#define MAX77693_STATUS1_ADC1K_SHIFT 7
317#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) 317#define MAX77693_STATUS1_ADC_MASK (0x1f << MAX77693_STATUS1_ADC_SHIFT)
318#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT) 318#define MAX77693_STATUS1_ADCLOW_MASK BIT(MAX77693_STATUS1_ADCLOW_SHIFT)
319#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT) 319#define MAX77693_STATUS1_ADCERR_MASK BIT(MAX77693_STATUS1_ADCERR_SHIFT)
320#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT) 320#define MAX77693_STATUS1_ADC1K_MASK BIT(MAX77693_STATUS1_ADC1K_SHIFT)
321 321
322#define STATUS2_CHGTYP_SHIFT (0) 322#define MAX77693_STATUS2_CHGTYP_SHIFT 0
323#define STATUS2_CHGDETRUN_SHIFT (3) 323#define MAX77693_STATUS2_CHGDETRUN_SHIFT 3
324#define STATUS2_DCDTMR_SHIFT (4) 324#define MAX77693_STATUS2_DCDTMR_SHIFT 4
325#define STATUS2_DXOVP_SHIFT (5) 325#define MAX77693_STATUS2_DXOVP_SHIFT 5
326#define STATUS2_VBVOLT_SHIFT (6) 326#define MAX77693_STATUS2_VBVOLT_SHIFT 6
327#define STATUS2_VIDRM_SHIFT (7) 327#define MAX77693_STATUS2_VIDRM_SHIFT 7
328#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) 328#define MAX77693_STATUS2_CHGTYP_MASK (0x7 << MAX77693_STATUS2_CHGTYP_SHIFT)
329#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT) 329#define MAX77693_STATUS2_CHGDETRUN_MASK BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT)
330#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT) 330#define MAX77693_STATUS2_DCDTMR_MASK BIT(MAX77693_STATUS2_DCDTMR_SHIFT)
331#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT) 331#define MAX77693_STATUS2_DXOVP_MASK BIT(MAX77693_STATUS2_DXOVP_SHIFT)
332#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT) 332#define MAX77693_STATUS2_VBVOLT_MASK BIT(MAX77693_STATUS2_VBVOLT_SHIFT)
333#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT) 333#define MAX77693_STATUS2_VIDRM_MASK BIT(MAX77693_STATUS2_VIDRM_SHIFT)
334 334
335#define STATUS3_OVP_SHIFT (2) 335#define MAX77693_STATUS3_OVP_SHIFT 2
336#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT) 336#define MAX77693_STATUS3_OVP_MASK BIT(MAX77693_STATUS3_OVP_SHIFT)
337 337
338/* MAX77693 CDETCTRL1~2 register */ 338/* MAX77693 CDETCTRL1~2 register */
339#define CDETCTRL1_CHGDETEN_SHIFT (0) 339#define CDETCTRL1_CHGDETEN_SHIFT (0)
@@ -362,38 +362,38 @@ enum max77693_muic_reg {
362#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) 362#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
363#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) 363#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
364#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK) 364#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
365#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \ 365#define MAX77693_CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
366 | (1 << COMN1SW_SHIFT)) 366 | (1 << COMN1SW_SHIFT))
367#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \ 367#define MAX77693_CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
368 | (2 << COMN1SW_SHIFT)) 368 | (2 << COMN1SW_SHIFT))
369#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \ 369#define MAX77693_CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
370 | (3 << COMN1SW_SHIFT)) 370 | (3 << COMN1SW_SHIFT))
371#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \ 371#define MAX77693_CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
372 | (0 << COMN1SW_SHIFT)) 372 | (0 << COMN1SW_SHIFT))
373 373
374#define CONTROL2_LOWPWR_SHIFT (0) 374#define MAX77693_CONTROL2_LOWPWR_SHIFT 0
375#define CONTROL2_ADCEN_SHIFT (1) 375#define MAX77693_CONTROL2_ADCEN_SHIFT 1
376#define CONTROL2_CPEN_SHIFT (2) 376#define MAX77693_CONTROL2_CPEN_SHIFT 2
377#define CONTROL2_SFOUTASRT_SHIFT (3) 377#define MAX77693_CONTROL2_SFOUTASRT_SHIFT 3
378#define CONTROL2_SFOUTORD_SHIFT (4) 378#define MAX77693_CONTROL2_SFOUTORD_SHIFT 4
379#define CONTROL2_ACCDET_SHIFT (5) 379#define MAX77693_CONTROL2_ACCDET_SHIFT 5
380#define CONTROL2_USBCPINT_SHIFT (6) 380#define MAX77693_CONTROL2_USBCPINT_SHIFT 6
381#define CONTROL2_RCPS_SHIFT (7) 381#define MAX77693_CONTROL2_RCPS_SHIFT 7
382#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT) 382#define MAX77693_CONTROL2_LOWPWR_MASK BIT(MAX77693_CONTROL2_LOWPWR_SHIFT)
383#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT) 383#define MAX77693_CONTROL2_ADCEN_MASK BIT(MAX77693_CONTROL2_ADCEN_SHIFT)
384#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT) 384#define MAX77693_CONTROL2_CPEN_MASK BIT(MAX77693_CONTROL2_CPEN_SHIFT)
385#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT) 385#define MAX77693_CONTROL2_SFOUTASRT_MASK BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT)
386#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT) 386#define MAX77693_CONTROL2_SFOUTORD_MASK BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT)
387#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT) 387#define MAX77693_CONTROL2_ACCDET_MASK BIT(MAX77693_CONTROL2_ACCDET_SHIFT)
388#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT) 388#define MAX77693_CONTROL2_USBCPINT_MASK BIT(MAX77693_CONTROL2_USBCPINT_SHIFT)
389#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT) 389#define MAX77693_CONTROL2_RCPS_MASK BIT(MAX77693_CONTROL2_RCPS_SHIFT)
390 390
391#define CONTROL3_JIGSET_SHIFT (0) 391#define MAX77693_CONTROL3_JIGSET_SHIFT 0
392#define CONTROL3_BTLDSET_SHIFT (2) 392#define MAX77693_CONTROL3_BTLDSET_SHIFT 2
393#define CONTROL3_ADCDBSET_SHIFT (4) 393#define MAX77693_CONTROL3_ADCDBSET_SHIFT 4
394#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT) 394#define MAX77693_CONTROL3_JIGSET_MASK (0x3 << MAX77693_CONTROL3_JIGSET_SHIFT)
395#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT) 395#define MAX77693_CONTROL3_BTLDSET_MASK (0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT)
396#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT) 396#define MAX77693_CONTROL3_ADCDBSET_MASK (0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT)
397 397
398/* Slave addr = 0x90: Haptic */ 398/* Slave addr = 0x90: Haptic */
399enum max77693_haptic_reg { 399enum max77693_haptic_reg {
@@ -529,36 +529,4 @@ enum max77693_irq_muic {
529 MAX77693_MUIC_IRQ_NR, 529 MAX77693_MUIC_IRQ_NR,
530}; 530};
531 531
532struct max77693_dev {
533 struct device *dev;
534 struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
535 struct i2c_client *muic; /* 0x4A , MUIC */
536 struct i2c_client *haptic; /* 0x90 , Haptic */
537
538 int type;
539
540 struct regmap *regmap;
541 struct regmap *regmap_muic;
542 struct regmap *regmap_haptic;
543
544 struct regmap_irq_chip_data *irq_data_led;
545 struct regmap_irq_chip_data *irq_data_topsys;
546 struct regmap_irq_chip_data *irq_data_charger;
547 struct regmap_irq_chip_data *irq_data_muic;
548
549 int irq;
550 int irq_gpio;
551 struct mutex irqlock;
552 int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
553 int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
554};
555
556enum max77693_types {
557 TYPE_MAX77693,
558};
559
560extern int max77693_irq_init(struct max77693_dev *max77686);
561extern void max77693_irq_exit(struct max77693_dev *max77686);
562extern int max77693_irq_resume(struct max77693_dev *max77686);
563
564#endif /* __LINUX_MFD_MAX77693_PRIV_H */ 532#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 7178ace8379e..c19303b0ccfd 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -318,62 +318,62 @@ enum max77843_irq_muic {
318 MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK) 318 MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
319 319
320/* MAX77843 STATUS register*/ 320/* MAX77843 STATUS register*/
321#define STATUS1_ADC_SHIFT 0 321#define MAX77843_MUIC_STATUS1_ADC_SHIFT 0
322#define STATUS1_ADCERROR_SHIFT 6 322#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT 6
323#define STATUS1_ADC1K_SHIFT 7 323#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT 7
324#define STATUS2_CHGTYP_SHIFT 0 324#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT 0
325#define STATUS2_CHGDETRUN_SHIFT 3 325#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT 3
326#define STATUS2_DCDTMR_SHIFT 4 326#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT 4
327#define STATUS2_DXOVP_SHIFT 5 327#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT 5
328#define STATUS2_VBVOLT_SHIFT 6 328#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT 6
329#define STATUS3_VBADC_SHIFT 0 329#define MAX77843_MUIC_STATUS3_VBADC_SHIFT 0
330#define STATUS3_VDNMON_SHIFT 4 330#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT 4
331#define STATUS3_DNRES_SHIFT 5 331#define MAX77843_MUIC_STATUS3_DNRES_SHIFT 5
332#define STATUS3_MPNACK_SHIFT 6 332#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT 6
333 333
334#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) 334#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT)
335#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT) 335#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT)
336#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT) 336#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT)
337#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) 337#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT)
338#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) 338#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT)
339#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) 339#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT)
340#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT) 340#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT)
341#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) 341#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT)
342#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT) 342#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT)
343#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT) 343#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT)
344#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT) 344#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT)
345#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT) 345#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT)
346 346
347/* MAX77843 CONTROL register */ 347/* MAX77843 CONTROL register */
348#define CONTROL1_COMP1SW_SHIFT 0 348#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
349#define CONTROL1_COMP2SW_SHIFT 3 349#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
350#define CONTROL1_IDBEN_SHIFT 7 350#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
351#define CONTROL2_LOWPWR_SHIFT 0 351#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
352#define CONTROL2_ADCEN_SHIFT 1 352#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
353#define CONTROL2_CPEN_SHIFT 2 353#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT 2
354#define CONTROL2_ACC_DET_SHIFT 5 354#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT 5
355#define CONTROL2_USBCPINT_SHIFT 6 355#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT 6
356#define CONTROL2_RCPS_SHIFT 7 356#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT 7
357#define CONTROL3_JIGSET_SHIFT 0 357#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT 0
358#define CONTROL4_ADCDBSET_SHIFT 0 358#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT 0
359#define CONTROL4_USBAUTO_SHIFT 4 359#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT 4
360#define CONTROL4_FCTAUTO_SHIFT 5 360#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT 5
361#define CONTROL4_ADCMODE_SHIFT 6 361#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT 6
362 362
363#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT) 363#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
364#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT) 364#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
365#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT) 365#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
366#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT) 366#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
367#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT) 367#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
368#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT) 368#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
369#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT) 369#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT)
370#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT) 370#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT)
371#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT) 371#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT)
372#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT) 372#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT)
373#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT) 373#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT)
374#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT) 374#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT)
375#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT) 375#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)
376#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT) 376#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT)
377 377
378/* MAX77843 switch port */ 378/* MAX77843 switch port */
379#define COM_OPEN 0 379#define COM_OPEN 0
@@ -383,38 +383,38 @@ enum max77843_irq_muic {
383#define COM_AUX_USB 4 383#define COM_AUX_USB 4
384#define COM_AUX_UART 5 384#define COM_AUX_UART 5
385 385
386#define CONTROL1_COM_SW \ 386#define MAX77843_MUIC_CONTROL1_COM_SW \
387 ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \ 387 ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
388 MAX77843_MUIC_CONTROL1_COMP2SW_MASK)) 388 MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
389 389
390#define CONTROL1_SW_OPEN \ 390#define MAX77843_MUIC_CONTROL1_SW_OPEN \
391 ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \ 391 ((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
392 COM_OPEN << CONTROL1_COMP2SW_SHIFT)) 392 COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
393#define CONTROL1_SW_USB \ 393#define MAX77843_MUIC_CONTROL1_SW_USB \
394 ((COM_USB << CONTROL1_COMP1SW_SHIFT | \ 394 ((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
395 COM_USB << CONTROL1_COMP2SW_SHIFT)) 395 COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
396#define CONTROL1_SW_AUDIO \ 396#define MAX77843_MUIC_CONTROL1_SW_AUDIO \
397 ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \ 397 ((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
398 COM_AUDIO << CONTROL1_COMP2SW_SHIFT)) 398 COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
399#define CONTROL1_SW_UART \ 399#define MAX77843_MUIC_CONTROL1_SW_UART \
400 ((COM_UART << CONTROL1_COMP1SW_SHIFT | \ 400 ((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
401 COM_UART << CONTROL1_COMP2SW_SHIFT)) 401 COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
402#define CONTROL1_SW_AUX_USB \ 402#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \
403 ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \ 403 ((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
404 COM_AUX_USB << CONTROL1_COMP2SW_SHIFT)) 404 COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
405#define CONTROL1_SW_AUX_UART \ 405#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \
406 ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \ 406 ((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
407 COM_AUX_UART << CONTROL1_COMP2SW_SHIFT)) 407 COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
408 408
409#define MAX77843_DISABLE 0 409#define MAX77843_DISABLE 0
410#define MAX77843_ENABLE 1 410#define MAX77843_ENABLE 1
411 411
412#define CONTROL4_AUTO_DISABLE \ 412#define CONTROL4_AUTO_DISABLE \
413 ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \ 413 ((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
414 (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT)) 414 (MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
415#define CONTROL4_AUTO_ENABLE \ 415#define CONTROL4_AUTO_ENABLE \
416 ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \ 416 ((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
417 (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT)) 417 (MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
418 418
419/* MAX77843 SAFEOUT LDO Control register */ 419/* MAX77843 SAFEOUT LDO Control register */
420#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0 420#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
@@ -431,24 +431,4 @@ enum max77843_irq_muic {
431#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \ 431#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
432 (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT) 432 (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
433 433
434struct max77843 {
435 struct device *dev;
436
437 struct i2c_client *i2c;
438 struct i2c_client *i2c_chg;
439 struct i2c_client *i2c_fuel;
440 struct i2c_client *i2c_muic;
441
442 struct regmap *regmap;
443 struct regmap *regmap_chg;
444 struct regmap *regmap_fuel;
445 struct regmap *regmap_muic;
446
447 struct regmap_irq_chip_data *irq_data;
448 struct regmap_irq_chip_data *irq_data_chg;
449 struct regmap_irq_chip_data *irq_data_fuel;
450 struct regmap_irq_chip_data *irq_data_muic;
451
452 int irq;
453};
454#endif /* __MAX77843_H__ */ 434#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index cf5265b0d1c1..45b8e8aa1fbf 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@ struct mt6397_chip {
57 int irq; 57 int irq;
58 struct irq_domain *irq_domain; 58 struct irq_domain *irq_domain;
59 struct mutex irqlock; 59 struct mutex irqlock;
60 u16 wake_mask[2];
60 u16 irq_masks_cur[2]; 61 u16 irq_masks_cur[2];
61 u16 irq_masks_cache[2]; 62 u16 irq_masks_cache[2];
62}; 63};
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index bb270bd03eed..13e1d96935ed 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -21,6 +21,7 @@
21#include <linux/regmap.h> 21#include <linux/regmap.h>
22#include <linux/regulator/driver.h> 22#include <linux/regulator/driver.h>
23#include <linux/extcon.h> 23#include <linux/extcon.h>
24#include <linux/of_gpio.h>
24#include <linux/usb/phy_companion.h> 25#include <linux/usb/phy_companion.h>
25 26
26#define PALMAS_NUM_CLIENTS 3 27#define PALMAS_NUM_CLIENTS 3
@@ -551,10 +552,16 @@ struct palmas_usb {
551 int vbus_otg_irq; 552 int vbus_otg_irq;
552 int vbus_irq; 553 int vbus_irq;
553 554
555 int gpio_id_irq;
556 struct gpio_desc *id_gpiod;
557 unsigned long sw_debounce_jiffies;
558 struct delayed_work wq_detectid;
559
554 enum palmas_usb_state linkstat; 560 enum palmas_usb_state linkstat;
555 int wakeup; 561 int wakeup;
556 bool enable_vbus_detection; 562 bool enable_vbus_detection;
557 bool enable_id_detection; 563 bool enable_id_detection;
564 bool enable_gpio_id_detection;
558}; 565};
559 566
560#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator) 567#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d16f4c82c568..558a485d03ab 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -435,4 +435,12 @@
435#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) 435#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
436#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) 436#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
437 437
438/* For imx6ul iomux gpr register field define */
439#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
440#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
441#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
442#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
443#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
444#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17)
445
438#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ 446#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
new file mode 100644
index 000000000000..eb492d47f717
--- /dev/null
+++ b/include/linux/microchipphy.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _MICROCHIPPHY_H
19#define _MICROCHIPPHY_H
20
21#define LAN88XX_INT_MASK (0x19)
22#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000)
23#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000)
24#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000)
25#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000)
26#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800)
27#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400)
28#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200)
29#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100)
30#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080)
31#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040)
32#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020)
33#define LAN88XX_INT_MASK_RESERVED_ (0x0010)
34#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008)
35#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004)
36#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
37#define LAN88XX_INT_MASK_RX__ER_ (0x0001)
38
39#define LAN88XX_INT_STS (0x1A)
40#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000)
41#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000)
42#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000)
43#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000)
44#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800)
45#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400)
46#define LAN88XX_INT_STS_POE_DETECT_ (0x0200)
47#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100)
48#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080)
49#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040)
50#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020)
51#define LAN88XX_INT_STS_RESERVED_ (0x0010)
52#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008)
53#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004)
54#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
55#define LAN88XX_INT_STS_RX_ER_ (0x0001)
56
57#define LAN88XX_EXT_PAGE_ACCESS (0x1F)
58#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000)
59#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001)
60#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002)
61
62/* Extended Register Page 1 space */
63#define LAN88XX_EXT_MODE_CTRL (0x13)
64#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
65#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
66#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008)
67#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C)
68
69/* MMD 3 Registers */
70#define LAN88XX_MMD3_CHIP_ID (32877)
71#define LAN88XX_MMD3_CHIP_REV (32878)
72
73#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 819077c32690..81f6e427ba6b 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -67,7 +67,7 @@ struct miscdevice {
67}; 67};
68 68
69extern int misc_register(struct miscdevice *misc); 69extern int misc_register(struct miscdevice *misc);
70extern int misc_deregister(struct miscdevice *misc); 70extern void misc_deregister(struct miscdevice *misc);
71 71
72#define MODULE_ALIAS_MISCDEV(minor) \ 72#define MODULE_ALIAS_MISCDEV(minor) \
73 MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \ 73 MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12a1163..09cebe528488 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
88 88
89enum { 89enum {
90 MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31, 90 MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
91 MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, 91 MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
92 MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
92 MLX4_CQE_L2_TUNNEL = 1 << 27, 93 MLX4_CQE_L2_TUNNEL = 1 << 27,
93 MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26, 94 MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
94 MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25, 95 MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1ce3b4a..baad4cb8e9b0 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -79,7 +79,8 @@ enum {
79 79
80enum { 80enum {
81 MLX4_MAX_PORTS = 2, 81 MLX4_MAX_PORTS = 2,
82 MLX4_MAX_PORT_PKEYS = 128 82 MLX4_MAX_PORT_PKEYS = 128,
83 MLX4_MAX_PORT_GIDS = 128
83}; 84};
84 85
85/* base qkey for use in sriov tunnel-qp/proxy-qp communication. 86/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
@@ -211,6 +212,8 @@ enum {
211 MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26, 212 MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
212 MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27, 213 MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
213 MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, 214 MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
215 MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
216 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
214}; 217};
215 218
216enum { 219enum {
@@ -581,6 +584,7 @@ struct mlx4_caps {
581 u64 phys_port_id[MLX4_MAX_PORTS + 1]; 584 u64 phys_port_id[MLX4_MAX_PORTS + 1];
582 int tunnel_offload_mode; 585 int tunnel_offload_mode;
583 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; 586 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
587 u8 phv_bit[MLX4_MAX_PORTS + 1];
584 u8 alloc_res_qp_mask; 588 u8 alloc_res_qp_mask;
585 u32 dmfs_high_rate_qpn_base; 589 u32 dmfs_high_rate_qpn_base;
586 u32 dmfs_high_rate_qpn_range; 590 u32 dmfs_high_rate_qpn_range;
@@ -1332,6 +1336,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
1332int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, 1336int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
1333 u8 ignore_fcs_value); 1337 u8 ignore_fcs_value);
1334int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); 1338int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
1339int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
1340int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
1335int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); 1341int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1336int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1342int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1337int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1343int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 9553a73d2049..5a06d969338e 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -59,6 +59,7 @@ struct mlx4_interface {
59 void (*event) (struct mlx4_dev *dev, void *context, 59 void (*event) (struct mlx4_dev *dev, void *context,
60 enum mlx4_dev_event event, unsigned long param); 60 enum mlx4_dev_event event, unsigned long param);
61 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); 61 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
62 void (*activate)(struct mlx4_dev *dev, void *context);
62 struct list_head list; 63 struct list_head list;
63 enum mlx4_protocol protocol; 64 enum mlx4_protocol protocol;
64 int flags; 65 int flags;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539e5456..de45a51b3f04 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@ enum {
272 MLX4_WQE_CTRL_SOLICITED = 1 << 1, 272 MLX4_WQE_CTRL_SOLICITED = 1 << 1,
273 MLX4_WQE_CTRL_IP_CSUM = 1 << 4, 273 MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
274 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, 274 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
275 MLX4_WQE_CTRL_INS_VLAN = 1 << 6, 275 MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
276 MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
276 MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, 277 MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
277 MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0, 278 MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
278}; 279};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9e2097..8eb3b19af2a4 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -402,6 +402,17 @@ struct mlx5_cmd_teardown_hca_mbox_out {
402 u8 rsvd[8]; 402 u8 rsvd[8];
403}; 403};
404 404
405struct mlx5_cmd_query_special_contexts_mbox_in {
406 struct mlx5_inbox_hdr hdr;
407 u8 rsvd[8];
408};
409
410struct mlx5_cmd_query_special_contexts_mbox_out {
411 struct mlx5_outbox_hdr hdr;
412 __be32 dump_fill_mkey;
413 __be32 resd_lkey;
414};
415
405struct mlx5_cmd_layout { 416struct mlx5_cmd_layout {
406 u8 type; 417 u8 type;
407 u8 rsvd0[3]; 418 u8 rsvd0[3];
@@ -1182,6 +1193,16 @@ enum {
1182 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1193 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1183}; 1194};
1184 1195
1196enum {
1197 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1198 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1199 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1200 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1201 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1202 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1203 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
1204};
1205
1185static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1206static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1186{ 1207{
1187 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1208 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88c2429..27b53f9a24ad 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,8 @@ enum {
103 MLX5_REG_PMTU = 0x5003, 103 MLX5_REG_PMTU = 0x5003,
104 MLX5_REG_PTYS = 0x5004, 104 MLX5_REG_PTYS = 0x5004,
105 MLX5_REG_PAOS = 0x5006, 105 MLX5_REG_PAOS = 0x5006,
106 MLX5_REG_PFCC = 0x5007,
107 MLX5_REG_PPCNT = 0x5008,
106 MLX5_REG_PMAOS = 0x5012, 108 MLX5_REG_PMAOS = 0x5012,
107 MLX5_REG_PUDE = 0x5009, 109 MLX5_REG_PUDE = 0x5009,
108 MLX5_REG_PMPE = 0x5010, 110 MLX5_REG_PMPE = 0x5010,
@@ -151,8 +153,8 @@ enum mlx5_dev_event {
151}; 153};
152 154
153enum mlx5_port_status { 155enum mlx5_port_status {
154 MLX5_PORT_UP = 1 << 1, 156 MLX5_PORT_UP = 1,
155 MLX5_PORT_DOWN = 1 << 2, 157 MLX5_PORT_DOWN = 2,
156}; 158};
157 159
158struct mlx5_uuar_info { 160struct mlx5_uuar_info {
@@ -380,7 +382,7 @@ struct mlx5_uar {
380 u32 index; 382 u32 index;
381 struct list_head bf_list; 383 struct list_head bf_list;
382 unsigned free_bf_bmap; 384 unsigned free_bf_bmap;
383 void __iomem *wc_map; 385 void __iomem *bf_map;
384 void __iomem *map; 386 void __iomem *map;
385}; 387};
386 388
@@ -435,6 +437,8 @@ struct mlx5_priv {
435 struct mlx5_uuar_info uuari; 437 struct mlx5_uuar_info uuari;
436 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); 438 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
437 439
440 struct io_mapping *bf_mapping;
441
438 /* pages stuff */ 442 /* pages stuff */
439 struct workqueue_struct *pg_wq; 443 struct workqueue_struct *pg_wq;
440 struct rb_root page_root; 444 struct rb_root page_root;
@@ -463,6 +467,10 @@ struct mlx5_priv {
463 /* end: mr staff */ 467 /* end: mr staff */
464 468
465 /* start: alloc staff */ 469 /* start: alloc staff */
470 /* protect buffer alocation according to numa node */
471 struct mutex alloc_mutex;
472 int numa_node;
473
466 struct mutex pgdir_mutex; 474 struct mutex pgdir_mutex;
467 struct list_head pgdir_list; 475 struct list_head pgdir_list;
468 /* end: alloc staff */ 476 /* end: alloc staff */
@@ -672,6 +680,8 @@ void mlx5_health_cleanup(void);
672void __init mlx5_health_init(void); 680void __init mlx5_health_init(void);
673void mlx5_start_health_poll(struct mlx5_core_dev *dev); 681void mlx5_start_health_poll(struct mlx5_core_dev *dev);
674void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 682void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
683int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
684 struct mlx5_buf *buf, int node);
675int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); 685int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
676void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); 686void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
677struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, 687struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -752,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
752 u8 local_port); 762 u8 local_port);
753int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, 763int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
754 int proto_mask); 764 int proto_mask);
755int mlx5_set_port_status(struct mlx5_core_dev *dev, 765int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
756 enum mlx5_port_status status); 766 enum mlx5_port_status status);
757int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); 767int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
768 enum mlx5_port_status *status);
758 769
759int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); 770int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
760void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); 771void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -764,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
764int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, 775int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
765 u8 *vl_hw_cap, u8 local_port); 776 u8 *vl_hw_cap, u8 local_port);
766 777
778int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
779int mlx5_query_port_pause(struct mlx5_core_dev *dev,
780 u32 *rx_pause, u32 *tx_pause);
781
767int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 782int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
768void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 783void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
769int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 784int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -773,6 +788,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
773int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 788int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
774void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 789void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
775int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 790int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
791int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
792 int node);
776void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 793void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
777 794
778const char *mlx5_command_str(int command); 795const char *mlx5_command_str(int command);
@@ -828,6 +845,7 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
828int mlx5_register_interface(struct mlx5_interface *intf); 845int mlx5_register_interface(struct mlx5_interface *intf);
829void mlx5_unregister_interface(struct mlx5_interface *intf); 846void mlx5_unregister_interface(struct mlx5_interface *intf);
830int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 847int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
848int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
831 849
832struct mlx5_profile { 850struct mlx5_profile {
833 u64 mask; 851 u64 mask;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fee041c..dd2097455a2e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@ enum {
1936}; 1936};
1937 1937
1938enum { 1938enum {
1939 MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, 1939 MLX5_RX_HASH_FN_NONE = 0x0,
1940 MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, 1940 MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
1941 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, 1941 MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
1942}; 1942};
1943 1943
1944enum { 1944enum {
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
4050 struct mlx5_ifc_tisc_bits ctx; 4050 struct mlx5_ifc_tisc_bits ctx;
4051}; 4051};
4052 4052
4053struct mlx5_ifc_modify_tir_bitmask_bits {
4054 u8 reserved[0x20];
4055
4056 u8 reserved1[0x1f];
4057 u8 lro[0x1];
4058};
4059
4053struct mlx5_ifc_modify_tir_out_bits { 4060struct mlx5_ifc_modify_tir_out_bits {
4054 u8 status[0x8]; 4061 u8 status[0x8];
4055 u8 reserved_0[0x18]; 4062 u8 reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
4071 4078
4072 u8 reserved_3[0x20]; 4079 u8 reserved_3[0x20];
4073 4080
4074 u8 modify_bitmask[0x40]; 4081 struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
4075 4082
4076 u8 reserved_4[0x40]; 4083 u8 reserved_4[0x40];
4077 4084
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
4116 u8 reserved_1[0x40]; 4123 u8 reserved_1[0x40];
4117}; 4124};
4118 4125
4126struct mlx5_ifc_rqt_bitmask_bits {
4127 u8 reserved[0x20];
4128
4129 u8 reserved1[0x1f];
4130 u8 rqn_list[0x1];
4131};
4132
4119struct mlx5_ifc_modify_rqt_in_bits { 4133struct mlx5_ifc_modify_rqt_in_bits {
4120 u8 opcode[0x10]; 4134 u8 opcode[0x10];
4121 u8 reserved_0[0x10]; 4135 u8 reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
4128 4142
4129 u8 reserved_3[0x20]; 4143 u8 reserved_3[0x20];
4130 4144
4131 u8 modify_bitmask[0x40]; 4145 struct mlx5_ifc_rqt_bitmask_bits bitmask;
4132 4146
4133 u8 reserved_4[0x40]; 4147 u8 reserved_4[0x40];
4134 4148
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf6f117fcf4d..fda728e3c27d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -124,8 +124,10 @@ extern unsigned int kobjsize(const void *objp);
124#define VM_MAYSHARE 0x00000080 124#define VM_MAYSHARE 0x00000080
125 125
126#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 126#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
127#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
127#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 128#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
128#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 129#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
130#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
129 131
130#define VM_LOCKED 0x00002000 132#define VM_LOCKED 0x00002000
131#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 133#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
@@ -245,7 +247,10 @@ struct vm_fault {
245struct vm_operations_struct { 247struct vm_operations_struct {
246 void (*open)(struct vm_area_struct * area); 248 void (*open)(struct vm_area_struct * area);
247 void (*close)(struct vm_area_struct * area); 249 void (*close)(struct vm_area_struct * area);
250 int (*mremap)(struct vm_area_struct * area);
248 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 251 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
252 int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
253 pmd_t *, unsigned int flags);
249 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 254 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
250 255
251 /* notification that a previously read-only page is about to become 256 /* notification that a previously read-only page is about to become
@@ -304,18 +309,6 @@ struct inode;
304#define page_private(page) ((page)->private) 309#define page_private(page) ((page)->private)
305#define set_page_private(page, v) ((page)->private = (v)) 310#define set_page_private(page, v) ((page)->private = (v))
306 311
307/* It's valid only if the page is free path or free_list */
308static inline void set_freepage_migratetype(struct page *page, int migratetype)
309{
310 page->index = migratetype;
311}
312
313/* It's valid only if the page is free path or free_list */
314static inline int get_freepage_migratetype(struct page *page)
315{
316 return page->index;
317}
318
319/* 312/*
320 * FIXME: take this include out, include page-flags.h in 313 * FIXME: take this include out, include page-flags.h in
321 * files which need it (119 of them) 314 * files which need it (119 of them)
@@ -356,20 +349,15 @@ static inline int get_page_unless_zero(struct page *page)
356 return atomic_inc_not_zero(&page->_count); 349 return atomic_inc_not_zero(&page->_count);
357} 350}
358 351
359/*
360 * Try to drop a ref unless the page has a refcount of one, return false if
361 * that is the case.
362 * This is to make sure that the refcount won't become zero after this drop.
363 * This can be called when MMU is off so it must not access
364 * any of the virtual mappings.
365 */
366static inline int put_page_unless_one(struct page *page)
367{
368 return atomic_add_unless(&page->_count, -1, 1);
369}
370
371extern int page_is_ram(unsigned long pfn); 352extern int page_is_ram(unsigned long pfn);
372extern int region_is_ram(resource_size_t phys_addr, unsigned long size); 353
354enum {
355 REGION_INTERSECTS,
356 REGION_DISJOINT,
357 REGION_MIXED,
358};
359
360int region_intersects(resource_size_t offset, size_t size, const char *type);
373 361
374/* Support for virtually mapped pages */ 362/* Support for virtually mapped pages */
375struct page *vmalloc_to_page(const void *addr); 363struct page *vmalloc_to_page(const void *addr);
@@ -1257,6 +1245,11 @@ static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1257 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1245 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1258} 1246}
1259 1247
1248static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1249{
1250 return !vma->vm_ops;
1251}
1252
1260static inline int stack_guard_page_start(struct vm_area_struct *vma, 1253static inline int stack_guard_page_start(struct vm_area_struct *vma,
1261 unsigned long addr) 1254 unsigned long addr)
1262{ 1255{
@@ -1833,7 +1826,7 @@ extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1833extern struct vm_area_struct *vma_merge(struct mm_struct *, 1826extern struct vm_area_struct *vma_merge(struct mm_struct *,
1834 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1827 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1835 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 1828 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1836 struct mempolicy *); 1829 struct mempolicy *, struct vm_userfaultfd_ctx);
1837extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 1830extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1838extern int split_vma(struct mm_struct *, 1831extern int split_vma(struct mm_struct *,
1839 struct vm_area_struct *, unsigned long addr, int new_below); 1832 struct vm_area_struct *, unsigned long addr, int new_below);
@@ -1880,11 +1873,19 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
1880 1873
1881extern unsigned long mmap_region(struct file *file, unsigned long addr, 1874extern unsigned long mmap_region(struct file *file, unsigned long addr,
1882 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); 1875 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1883extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 1876extern unsigned long do_mmap(struct file *file, unsigned long addr,
1884 unsigned long len, unsigned long prot, unsigned long flags, 1877 unsigned long len, unsigned long prot, unsigned long flags,
1885 unsigned long pgoff, unsigned long *populate); 1878 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
1886extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1879extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1887 1880
1881static inline unsigned long
1882do_mmap_pgoff(struct file *file, unsigned long addr,
1883 unsigned long len, unsigned long prot, unsigned long flags,
1884 unsigned long pgoff, unsigned long *populate)
1885{
1886 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
1887}
1888
1888#ifdef CONFIG_MMU 1889#ifdef CONFIG_MMU
1889extern int __mm_populate(unsigned long addr, unsigned long len, 1890extern int __mm_populate(unsigned long addr, unsigned long len,
1890 int ignore_errors); 1891 int ignore_errors);
@@ -2183,6 +2184,7 @@ extern int memory_failure(unsigned long pfn, int trapno, int flags);
2183extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2184extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2184extern int unpoison_memory(unsigned long pfn); 2185extern int unpoison_memory(unsigned long pfn);
2185extern int get_hwpoison_page(struct page *page); 2186extern int get_hwpoison_page(struct page *page);
2187extern void put_hwpoison_page(struct page *page);
2186extern int sysctl_memory_failure_early_kill; 2188extern int sysctl_memory_failure_early_kill;
2187extern int sysctl_memory_failure_recovery; 2189extern int sysctl_memory_failure_recovery;
2188extern void shake_page(struct page *p, int access); 2190extern void shake_page(struct page *p, int access);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 15549578d559..3d6baa7d4534 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -235,7 +235,7 @@ struct page_frag_cache {
235 bool pfmemalloc; 235 bool pfmemalloc;
236}; 236};
237 237
238typedef unsigned long __nocast vm_flags_t; 238typedef unsigned long vm_flags_t;
239 239
240/* 240/*
241 * A region containing a mapping of a non-memory backed file under NOMMU 241 * A region containing a mapping of a non-memory backed file under NOMMU
@@ -256,6 +256,16 @@ struct vm_region {
256 * this region */ 256 * this region */
257}; 257};
258 258
259#ifdef CONFIG_USERFAULTFD
260#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
261struct vm_userfaultfd_ctx {
262 struct userfaultfd_ctx *ctx;
263};
264#else /* CONFIG_USERFAULTFD */
265#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
266struct vm_userfaultfd_ctx {};
267#endif /* CONFIG_USERFAULTFD */
268
259/* 269/*
260 * This struct defines a memory VMM memory area. There is one of these 270 * This struct defines a memory VMM memory area. There is one of these
261 * per VM-area/task. A VM area is any part of the process virtual memory 271 * per VM-area/task. A VM area is any part of the process virtual memory
@@ -322,6 +332,7 @@ struct vm_area_struct {
322#ifdef CONFIG_NUMA 332#ifdef CONFIG_NUMA
323 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 333 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
324#endif 334#endif
335 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
325}; 336};
326 337
327struct core_thread { 338struct core_thread {
@@ -543,6 +554,7 @@ enum tlb_flush_reason {
543 TLB_REMOTE_SHOOTDOWN, 554 TLB_REMOTE_SHOOTDOWN,
544 TLB_LOCAL_SHOOTDOWN, 555 TLB_LOCAL_SHOOTDOWN,
545 TLB_LOCAL_MM_SHOOTDOWN, 556 TLB_LOCAL_MM_SHOOTDOWN,
557 TLB_REMOTE_SEND_IPI,
546 NR_TLB_FLUSH_REASONS, 558 NR_TLB_FLUSH_REASONS,
547}; 559};
548 560
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d3776d25925..fdd0779ccdfa 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -279,10 +279,13 @@ struct mmc_card {
279#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ 279#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
280#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ 280#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
281#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ 281#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
282#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
283
282 284
283 unsigned int erase_size; /* erase size in sectors */ 285 unsigned int erase_size; /* erase size in sectors */
284 unsigned int erase_shift; /* if erase unit is power 2 */ 286 unsigned int erase_shift; /* if erase unit is power 2 */
285 unsigned int pref_erase; /* in sectors */ 287 unsigned int pref_erase; /* in sectors */
288 unsigned int eg_boundary; /* don't cross erase-group boundaries */
286 u8 erased_byte; /* value of erased bytes */ 289 u8 erased_byte; /* value of erased bytes */
287 290
288 u32 raw_cid[4]; /* raw card CID */ 291 u32 raw_cid[4]; /* raw card CID */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 5be97676f1fa..134c57422740 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -98,6 +98,7 @@ struct mmc_data;
98 * @irq_flags: The flags to be passed to request_irq. 98 * @irq_flags: The flags to be passed to request_irq.
99 * @irq: The irq value to be passed to request_irq. 99 * @irq: The irq value to be passed to request_irq.
100 * @sdio_id0: Number of slot0 in the SDIO interrupt registers. 100 * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
101 * @dto_timer: Timer for broken data transfer over scheme.
101 * 102 *
102 * Locking 103 * Locking
103 * ======= 104 * =======
@@ -153,11 +154,7 @@ struct dw_mci {
153 dma_addr_t sg_dma; 154 dma_addr_t sg_dma;
154 void *sg_cpu; 155 void *sg_cpu;
155 const struct dw_mci_dma_ops *dma_ops; 156 const struct dw_mci_dma_ops *dma_ops;
156#ifdef CONFIG_MMC_DW_IDMAC
157 unsigned int ring_size; 157 unsigned int ring_size;
158#else
159 struct dw_mci_dma_data *dma_data;
160#endif
161 u32 cmd_status; 158 u32 cmd_status;
162 u32 data_status; 159 u32 data_status;
163 u32 stop_cmdr; 160 u32 stop_cmdr;
@@ -204,6 +201,7 @@ struct dw_mci {
204 int sdio_id0; 201 int sdio_id0;
205 202
206 struct timer_list cmd11_timer; 203 struct timer_list cmd11_timer;
204 struct timer_list dto_timer;
207}; 205};
208 206
209/* DMA ops for Internal/External DMAC interface */ 207/* DMA ops for Internal/External DMAC interface */
@@ -226,6 +224,8 @@ struct dw_mci_dma_ops {
226#define DW_MCI_QUIRK_HIGHSPEED BIT(2) 224#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
227/* Unreliable card detection */ 225/* Unreliable card detection */
228#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) 226#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
227/* Timer for broken data transfer over scheme */
228#define DW_MCI_QUIRK_BROKEN_DTO BIT(4)
229 229
230struct dma_pdata; 230struct dma_pdata;
231 231
@@ -259,7 +259,6 @@ struct dw_mci_board {
259 259
260 struct dw_mci_dma_ops *dma_ops; 260 struct dw_mci_dma_ops *dma_ops;
261 struct dma_pdata *data; 261 struct dma_pdata *data;
262 struct block_settings *blk_settings;
263}; 262};
264 263
265#endif /* LINUX_MMC_DW_MMC_H */ 264#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1369e54faeb7..83b81fd865f3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -412,7 +412,8 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
412{ 412{
413 host->ops->enable_sdio_irq(host, 0); 413 host->ops->enable_sdio_irq(host, 0);
414 host->sdio_irq_pending = true; 414 host->sdio_irq_pending = true;
415 wake_up_process(host->sdio_irq_thread); 415 if (host->sdio_irq_thread)
416 wake_up_process(host->sdio_irq_thread);
416} 417}
417 418
418void sdio_run_irqs(struct mmc_host *host); 419void sdio_run_irqs(struct mmc_host *host);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 61cd67f4d788..a1a210d59961 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -66,6 +66,16 @@ struct mmu_notifier_ops {
66 unsigned long end); 66 unsigned long end);
67 67
68 /* 68 /*
69 * clear_young is a lightweight version of clear_flush_young. Like the
70 * latter, it is supposed to test-and-clear the young/accessed bitflag
71 * in the secondary pte, but it may omit flushing the secondary tlb.
72 */
73 int (*clear_young)(struct mmu_notifier *mn,
74 struct mm_struct *mm,
75 unsigned long start,
76 unsigned long end);
77
78 /*
69 * test_young is called to check the young/accessed bitflag in 79 * test_young is called to check the young/accessed bitflag in
70 * the secondary pte. This is used to know if the page is 80 * the secondary pte. This is used to know if the page is
71 * frequently used without actually clearing the flag or tearing 81 * frequently used without actually clearing the flag or tearing
@@ -203,6 +213,9 @@ extern void __mmu_notifier_release(struct mm_struct *mm);
203extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 213extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
204 unsigned long start, 214 unsigned long start,
205 unsigned long end); 215 unsigned long end);
216extern int __mmu_notifier_clear_young(struct mm_struct *mm,
217 unsigned long start,
218 unsigned long end);
206extern int __mmu_notifier_test_young(struct mm_struct *mm, 219extern int __mmu_notifier_test_young(struct mm_struct *mm,
207 unsigned long address); 220 unsigned long address);
208extern void __mmu_notifier_change_pte(struct mm_struct *mm, 221extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -231,6 +244,15 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
231 return 0; 244 return 0;
232} 245}
233 246
247static inline int mmu_notifier_clear_young(struct mm_struct *mm,
248 unsigned long start,
249 unsigned long end)
250{
251 if (mm_has_notifiers(mm))
252 return __mmu_notifier_clear_young(mm, start, end);
253 return 0;
254}
255
234static inline int mmu_notifier_test_young(struct mm_struct *mm, 256static inline int mmu_notifier_test_young(struct mm_struct *mm,
235 unsigned long address) 257 unsigned long address)
236{ 258{
@@ -311,6 +333,28 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
311 __young; \ 333 __young; \
312}) 334})
313 335
336#define ptep_clear_young_notify(__vma, __address, __ptep) \
337({ \
338 int __young; \
339 struct vm_area_struct *___vma = __vma; \
340 unsigned long ___address = __address; \
341 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
342 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
343 ___address + PAGE_SIZE); \
344 __young; \
345})
346
347#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
348({ \
349 int __young; \
350 struct vm_area_struct *___vma = __vma; \
351 unsigned long ___address = __address; \
352 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
353 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
354 ___address + PMD_SIZE); \
355 __young; \
356})
357
314#define ptep_clear_flush_notify(__vma, __address, __ptep) \ 358#define ptep_clear_flush_notify(__vma, __address, __ptep) \
315({ \ 359({ \
316 unsigned long ___addr = __address & PAGE_MASK; \ 360 unsigned long ___addr = __address & PAGE_MASK; \
@@ -427,6 +471,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
427 471
428#define ptep_clear_flush_young_notify ptep_clear_flush_young 472#define ptep_clear_flush_young_notify ptep_clear_flush_young
429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young 473#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
474#define ptep_clear_young_notify ptep_test_and_clear_young
475#define pmdp_clear_young_notify pmdp_test_and_clear_young
430#define ptep_clear_flush_notify ptep_clear_flush 476#define ptep_clear_flush_notify ptep_clear_flush
431#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush 477#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
432#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear 478#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 754c25966a0a..d94347737292 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -319,7 +319,11 @@ enum zone_type {
319 ZONE_HIGHMEM, 319 ZONE_HIGHMEM,
320#endif 320#endif
321 ZONE_MOVABLE, 321 ZONE_MOVABLE,
322#ifdef CONFIG_ZONE_DEVICE
323 ZONE_DEVICE,
324#endif
322 __MAX_NR_ZONES 325 __MAX_NR_ZONES
326
323}; 327};
324 328
325#ifndef __GENERATING_BOUNDS_H 329#ifndef __GENERATING_BOUNDS_H
@@ -690,14 +694,6 @@ struct zonelist {
690#endif 694#endif
691}; 695};
692 696
693#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
694struct node_active_region {
695 unsigned long start_pfn;
696 unsigned long end_pfn;
697 int nid;
698};
699#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
700
701#ifndef CONFIG_DISCONTIGMEM 697#ifndef CONFIG_DISCONTIGMEM
702/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 698/* The array of struct pages - for discontigmem use pgdat->lmem_map */
703extern struct page *mem_map; 699extern struct page *mem_map;
@@ -794,6 +790,25 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
794 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 790 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
795} 791}
796 792
793static inline int zone_id(const struct zone *zone)
794{
795 struct pglist_data *pgdat = zone->zone_pgdat;
796
797 return zone - pgdat->node_zones;
798}
799
800#ifdef CONFIG_ZONE_DEVICE
801static inline bool is_dev_zone(const struct zone *zone)
802{
803 return zone_id(zone) == ZONE_DEVICE;
804}
805#else
806static inline bool is_dev_zone(const struct zone *zone)
807{
808 return false;
809}
810#endif
811
797#include <linux/memory_hotplug.h> 812#include <linux/memory_hotplug.h>
798 813
799extern struct mutex zonelists_mutex; 814extern struct mutex zonelists_mutex;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 34f25b7bf642..688997a24aad 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -253,7 +253,7 @@ struct pcmcia_device_id {
253 253
254 __u32 prod_id_hash[4]; 254 __u32 prod_id_hash[4];
255 255
256 /* not matched against in kernelspace*/ 256 /* not matched against in kernelspace */
257 const char * prod_id[4]; 257 const char * prod_id[4];
258 258
259 /* not matched against */ 259 /* not matched against */
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..ef29eb2d6dfd
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
1#ifndef _LINUX_MPLS_IPTUNNEL_H
2#define _LINUX_MPLS_IPTUNNEL_H
3
4#include <uapi/linux/mpls_iptunnel.h>
5
6#endif /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ac4a68ffae2..ad939d0ba816 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -14,38 +14,85 @@ extern int pci_msi_ignore_mask;
14/* Helper functions */ 14/* Helper functions */
15struct irq_data; 15struct irq_data;
16struct msi_desc; 16struct msi_desc;
17struct pci_dev;
18struct platform_msi_priv_data;
17void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
18void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 20void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
19 21
22typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
23 struct msi_msg *msg);
24
25/**
26 * platform_msi_desc - Platform device specific msi descriptor data
27 * @msi_priv_data: Pointer to platform private data
28 * @msi_index: The index of the MSI descriptor for multi MSI
29 */
30struct platform_msi_desc {
31 struct platform_msi_priv_data *msi_priv_data;
32 u16 msi_index;
33};
34
35/**
36 * struct msi_desc - Descriptor structure for MSI based interrupts
37 * @list: List head for management
38 * @irq: The base interrupt number
39 * @nvec_used: The number of vectors used
40 * @dev: Pointer to the device which uses this descriptor
41 * @msg: The last set MSI message cached for reuse
42 *
43 * @masked: [PCI MSI/X] Mask bits
44 * @is_msix: [PCI MSI/X] True if MSI-X
45 * @multiple: [PCI MSI/X] log2 num of messages allocated
46 * @multi_cap: [PCI MSI/X] log2 num of messages supported
47 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
48 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
49 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
50 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
51 * @mask_pos: [PCI MSI] Mask register position
52 * @mask_base: [PCI MSI-X] Mask register base address
53 * @platform: [platform] Platform device specific msi descriptor data
54 */
20struct msi_desc { 55struct msi_desc {
21 struct { 56 /* Shared device/bus type independent data */
22 __u8 is_msix : 1; 57 struct list_head list;
23 __u8 multiple: 3; /* log2 num of messages allocated */ 58 unsigned int irq;
24 __u8 multi_cap : 3; /* log2 num of messages supported */ 59 unsigned int nvec_used;
25 __u8 maskbit : 1; /* mask-pending bit supported ? */ 60 struct device *dev;
26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 61 struct msi_msg msg;
27 __u16 entry_nr; /* specific enabled entry */
28 unsigned default_irq; /* default pre-assigned irq */
29 } msi_attrib;
30
31 u32 masked; /* mask bits */
32 unsigned int irq;
33 unsigned int nvec_used; /* number of messages */
34 struct list_head list;
35 62
36 union { 63 union {
37 void __iomem *mask_base; 64 /* PCI MSI/X specific data */
38 u8 mask_pos; 65 struct {
39 }; 66 u32 masked;
40 struct pci_dev *dev; 67 struct {
68 __u8 is_msix : 1;
69 __u8 multiple : 3;
70 __u8 multi_cap : 3;
71 __u8 maskbit : 1;
72 __u8 is_64 : 1;
73 __u16 entry_nr;
74 unsigned default_irq;
75 } msi_attrib;
76 union {
77 u8 mask_pos;
78 void __iomem *mask_base;
79 };
80 };
41 81
42 /* Last set MSI message */ 82 /*
43 struct msi_msg msg; 83 * Non PCI variants add their data structure here. New
84 * entries need to use a named structure. We want
85 * proper name spaces for this. The PCI part is
86 * anonymous for now as it would require an immediate
87 * tree wide cleanup.
88 */
89 struct platform_msi_desc platform;
90 };
44}; 91};
45 92
46/* Helpers to hide struct msi_desc implementation details */ 93/* Helpers to hide struct msi_desc implementation details */
47#define msi_desc_to_dev(desc) (&(desc)->dev.dev) 94#define msi_desc_to_dev(desc) ((desc)->dev)
48#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list) 95#define dev_to_msi_list(dev) (&(dev)->msi_list)
49#define first_msi_entry(dev) \ 96#define first_msi_entry(dev) \
50 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) 97 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
51#define for_each_msi_entry(desc, dev) \ 98#define for_each_msi_entry(desc, dev) \
@@ -56,12 +103,17 @@ struct msi_desc {
56#define for_each_pci_msi_entry(desc, pdev) \ 103#define for_each_pci_msi_entry(desc, pdev) \
57 for_each_msi_entry((desc), &(pdev)->dev) 104 for_each_msi_entry((desc), &(pdev)->dev)
58 105
59static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) 106struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
107void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
108#else /* CONFIG_PCI_MSI */
109static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
60{ 110{
61 return desc->dev; 111 return NULL;
62} 112}
63#endif /* CONFIG_PCI_MSI */ 113#endif /* CONFIG_PCI_MSI */
64 114
115struct msi_desc *alloc_msi_entry(struct device *dev);
116void free_msi_entry(struct msi_desc *entry);
65void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 117void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
66void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 118void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
67void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); 119void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -108,9 +160,6 @@ struct msi_controller {
108 struct device *dev; 160 struct device *dev;
109 struct device_node *of_node; 161 struct device_node *of_node;
110 struct list_head list; 162 struct list_head list;
111#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
112 struct irq_domain *domain;
113#endif
114 163
115 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, 164 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
116 struct msi_desc *desc); 165 struct msi_desc *desc);
@@ -221,6 +270,12 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
221void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 270void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
222struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); 271struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
223 272
273struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
274 struct msi_domain_info *info,
275 struct irq_domain *parent);
276int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
277 irq_write_msi_msg_t write_msi_msg);
278void platform_msi_domain_free_irqs(struct device *dev);
224#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ 279#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
225 280
226#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 281#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 29975c73a953..366cf77953b5 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -27,9 +27,9 @@
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/bug.h> 28#include <linux/bug.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/io.h>
30 31
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
32#include <asm/io.h>
33#include <asm/barrier.h> 33#include <asm/barrier.h>
34 34
35#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 35#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa06852771..049d4b03c4c4 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@ do { \
239 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) 239 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
240#define net_info_ratelimited(fmt, ...) \ 240#define net_info_ratelimited(fmt, ...) \
241 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) 241 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
242#if defined(DEBUG)
242#define net_dbg_ratelimited(fmt, ...) \ 243#define net_dbg_ratelimited(fmt, ...) \
243 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) 244 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
245#else
246#define net_dbg_ratelimited(fmt, ...) \
247 do { \
248 if (0) \
249 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
250 } while (0)
251#endif
244 252
245bool __net_get_random_once(void *buf, int nbytes, bool *done, 253bool __net_get_random_once(void *buf, int nbytes, bool *done,
246 struct static_key *done_key); 254 struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979dfd6a9..88a00694eda5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -766,6 +766,13 @@ struct netdev_phys_item_id {
766 unsigned char id_len; 766 unsigned char id_len;
767}; 767};
768 768
769static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
770 struct netdev_phys_item_id *b)
771{
772 return a->id_len == b->id_len &&
773 memcmp(a->id, b->id, a->id_len) == 0;
774}
775
769typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 776typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
770 struct sk_buff *skb); 777 struct sk_buff *skb);
771 778
@@ -1041,6 +1048,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1041 * TX queue. 1048 * TX queue.
1042 * int (*ndo_get_iflink)(const struct net_device *dev); 1049 * int (*ndo_get_iflink)(const struct net_device *dev);
1043 * Called to get the iflink value of this device. 1050 * Called to get the iflink value of this device.
1051 * void (*ndo_change_proto_down)(struct net_device *dev,
1052 * bool proto_down);
1053 * This function is used to pass protocol port error state information
1054 * to the switch driver. The switch driver can react to the proto_down
1055 * by doing a phys down on the associated switch port.
1056 *
1044 */ 1057 */
1045struct net_device_ops { 1058struct net_device_ops {
1046 int (*ndo_init)(struct net_device *dev); 1059 int (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@ struct net_device_ops {
1211 int queue_index, 1224 int queue_index,
1212 u32 maxrate); 1225 u32 maxrate);
1213 int (*ndo_get_iflink)(const struct net_device *dev); 1226 int (*ndo_get_iflink)(const struct net_device *dev);
1227 int (*ndo_change_proto_down)(struct net_device *dev,
1228 bool proto_down);
1214}; 1229};
1215 1230
1216/** 1231/**
@@ -1225,13 +1240,8 @@ struct net_device_ops {
1225 * 1240 *
1226 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1241 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1227 * @IFF_EBRIDGE: Ethernet bridging device 1242 * @IFF_EBRIDGE: Ethernet bridging device
1228 * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
1229 * @IFF_MASTER_8023AD: bonding master, 802.3ad
1230 * @IFF_MASTER_ALB: bonding master, balance-alb
1231 * @IFF_BONDING: bonding master or slave 1243 * @IFF_BONDING: bonding master or slave
1232 * @IFF_SLAVE_NEEDARP: need ARPs for validation
1233 * @IFF_ISATAP: ISATAP interface (RFC4214) 1244 * @IFF_ISATAP: ISATAP interface (RFC4214)
1234 * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
1235 * @IFF_WAN_HDLC: WAN HDLC device 1245 * @IFF_WAN_HDLC: WAN HDLC device
1236 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1246 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1237 * release skb->dst 1247 * release skb->dst
@@ -1247,44 +1257,40 @@ struct net_device_ops {
1247 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1257 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1248 * change when it's running 1258 * change when it's running
1249 * @IFF_MACVLAN: Macvlan device 1259 * @IFF_MACVLAN: Macvlan device
1260 * @IFF_VRF_MASTER: device is a VRF master
1261 * @IFF_NO_QUEUE: device can run without qdisc attached
1262 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1250 */ 1263 */
1251enum netdev_priv_flags { 1264enum netdev_priv_flags {
1252 IFF_802_1Q_VLAN = 1<<0, 1265 IFF_802_1Q_VLAN = 1<<0,
1253 IFF_EBRIDGE = 1<<1, 1266 IFF_EBRIDGE = 1<<1,
1254 IFF_SLAVE_INACTIVE = 1<<2, 1267 IFF_BONDING = 1<<2,
1255 IFF_MASTER_8023AD = 1<<3, 1268 IFF_ISATAP = 1<<3,
1256 IFF_MASTER_ALB = 1<<4, 1269 IFF_WAN_HDLC = 1<<4,
1257 IFF_BONDING = 1<<5, 1270 IFF_XMIT_DST_RELEASE = 1<<5,
1258 IFF_SLAVE_NEEDARP = 1<<6, 1271 IFF_DONT_BRIDGE = 1<<6,
1259 IFF_ISATAP = 1<<7, 1272 IFF_DISABLE_NETPOLL = 1<<7,
1260 IFF_MASTER_ARPMON = 1<<8, 1273 IFF_MACVLAN_PORT = 1<<8,
1261 IFF_WAN_HDLC = 1<<9, 1274 IFF_BRIDGE_PORT = 1<<9,
1262 IFF_XMIT_DST_RELEASE = 1<<10, 1275 IFF_OVS_DATAPATH = 1<<10,
1263 IFF_DONT_BRIDGE = 1<<11, 1276 IFF_TX_SKB_SHARING = 1<<11,
1264 IFF_DISABLE_NETPOLL = 1<<12, 1277 IFF_UNICAST_FLT = 1<<12,
1265 IFF_MACVLAN_PORT = 1<<13, 1278 IFF_TEAM_PORT = 1<<13,
1266 IFF_BRIDGE_PORT = 1<<14, 1279 IFF_SUPP_NOFCS = 1<<14,
1267 IFF_OVS_DATAPATH = 1<<15, 1280 IFF_LIVE_ADDR_CHANGE = 1<<15,
1268 IFF_TX_SKB_SHARING = 1<<16, 1281 IFF_MACVLAN = 1<<16,
1269 IFF_UNICAST_FLT = 1<<17, 1282 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1270 IFF_TEAM_PORT = 1<<18, 1283 IFF_IPVLAN_MASTER = 1<<18,
1271 IFF_SUPP_NOFCS = 1<<19, 1284 IFF_IPVLAN_SLAVE = 1<<19,
1272 IFF_LIVE_ADDR_CHANGE = 1<<20, 1285 IFF_VRF_MASTER = 1<<20,
1273 IFF_MACVLAN = 1<<21, 1286 IFF_NO_QUEUE = 1<<21,
1274 IFF_XMIT_DST_RELEASE_PERM = 1<<22, 1287 IFF_OPENVSWITCH = 1<<22,
1275 IFF_IPVLAN_MASTER = 1<<23,
1276 IFF_IPVLAN_SLAVE = 1<<24,
1277}; 1288};
1278 1289
1279#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1290#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1280#define IFF_EBRIDGE IFF_EBRIDGE 1291#define IFF_EBRIDGE IFF_EBRIDGE
1281#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
1282#define IFF_MASTER_8023AD IFF_MASTER_8023AD
1283#define IFF_MASTER_ALB IFF_MASTER_ALB
1284#define IFF_BONDING IFF_BONDING 1292#define IFF_BONDING IFF_BONDING
1285#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
1286#define IFF_ISATAP IFF_ISATAP 1293#define IFF_ISATAP IFF_ISATAP
1287#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
1288#define IFF_WAN_HDLC IFF_WAN_HDLC 1294#define IFF_WAN_HDLC IFF_WAN_HDLC
1289#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1295#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1290#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1296#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
@@ -1301,6 +1307,9 @@ enum netdev_priv_flags {
1301#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1307#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1302#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER 1308#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1303#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE 1309#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1310#define IFF_VRF_MASTER IFF_VRF_MASTER
1311#define IFF_NO_QUEUE IFF_NO_QUEUE
1312#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1304 1313
1305/** 1314/**
1306 * struct net_device - The DEVICE structure. 1315 * struct net_device - The DEVICE structure.
@@ -1417,6 +1426,7 @@ enum netdev_priv_flags {
1417 * @dn_ptr: DECnet specific data 1426 * @dn_ptr: DECnet specific data
1418 * @ip6_ptr: IPv6 specific data 1427 * @ip6_ptr: IPv6 specific data
1419 * @ax25_ptr: AX.25 specific data 1428 * @ax25_ptr: AX.25 specific data
1429 * @vrf_ptr: VRF specific data
1420 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1430 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1421 * 1431 *
1422 * @last_rx: Time of last Rx 1432 * @last_rx: Time of last Rx
@@ -1448,6 +1458,8 @@ enum netdev_priv_flags {
1448 * 1458 *
1449 * @xps_maps: XXX: need comments on this one 1459 * @xps_maps: XXX: need comments on this one
1450 * 1460 *
1461 * @offload_fwd_mark: Offload device fwding mark
1462 *
1451 * @trans_start: Time (in jiffies) of last Tx 1463 * @trans_start: Time (in jiffies) of last Tx
1452 * @watchdog_timeo: Represents the timeout that is used by 1464 * @watchdog_timeo: Represents the timeout that is used by
1453 * the watchdog ( see dev_watchdog() ) 1465 * the watchdog ( see dev_watchdog() )
@@ -1502,6 +1514,10 @@ enum netdev_priv_flags {
1502 * 1514 *
1503 * @qdisc_tx_busylock: XXX: need comments on this one 1515 * @qdisc_tx_busylock: XXX: need comments on this one
1504 * 1516 *
1517 * @proto_down: protocol port state information can be sent to the
1518 * switch driver and used to set the phys state of the
1519 * switch port.
1520 *
1505 * FIXME: cleanup struct net_device such that network protocol info 1521 * FIXME: cleanup struct net_device such that network protocol info
1506 * moves out. 1522 * moves out.
1507 */ 1523 */
@@ -1629,6 +1645,7 @@ struct net_device {
1629 struct dn_dev __rcu *dn_ptr; 1645 struct dn_dev __rcu *dn_ptr;
1630 struct inet6_dev __rcu *ip6_ptr; 1646 struct inet6_dev __rcu *ip6_ptr;
1631 void *ax25_ptr; 1647 void *ax25_ptr;
1648 struct net_vrf_dev __rcu *vrf_ptr;
1632 struct wireless_dev *ieee80211_ptr; 1649 struct wireless_dev *ieee80211_ptr;
1633 struct wpan_dev *ieee802154_ptr; 1650 struct wpan_dev *ieee802154_ptr;
1634#if IS_ENABLED(CONFIG_MPLS_ROUTING) 1651#if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1685,6 +1702,10 @@ struct net_device {
1685 struct xps_dev_maps __rcu *xps_maps; 1702 struct xps_dev_maps __rcu *xps_maps;
1686#endif 1703#endif
1687 1704
1705#ifdef CONFIG_NET_SWITCHDEV
1706 u32 offload_fwd_mark;
1707#endif
1708
1688 /* These may be needed for future network-power-down code. */ 1709 /* These may be needed for future network-power-down code. */
1689 1710
1690 /* 1711 /*
@@ -1762,6 +1783,7 @@ struct net_device {
1762#endif 1783#endif
1763 struct phy_device *phydev; 1784 struct phy_device *phydev;
1764 struct lock_class_key *qdisc_tx_busylock; 1785 struct lock_class_key *qdisc_tx_busylock;
1786 bool proto_down;
1765}; 1787};
1766#define to_net_dev(d) container_of(d, struct net_device, dev) 1788#define to_net_dev(d) container_of(d, struct net_device, dev)
1767 1789
@@ -2093,6 +2115,13 @@ struct netdev_notifier_change_info {
2093 unsigned int flags_changed; 2115 unsigned int flags_changed;
2094}; 2116};
2095 2117
2118struct netdev_notifier_changeupper_info {
2119 struct netdev_notifier_info info; /* must be first */
2120 struct net_device *upper_dev; /* new upper dev */
2121 bool master; /* is upper dev master */
2122 bool linking; /* is the nofication for link or unlink */
2123};
2124
2096static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2125static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2097 struct net_device *dev) 2126 struct net_device *dev)
2098{ 2127{
@@ -2277,8 +2306,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2277 2306
2278static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) 2307static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2279{ 2308{
2280 return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) == 2309 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2281 skb_gro_offset(skb));
2282} 2310}
2283 2311
2284static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, 2312static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2374,37 +2402,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2374 grc->delta = 0; 2402 grc->delta = 0;
2375} 2403}
2376 2404
2377static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, 2405static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2378 int start, int offset, 2406 unsigned int off, size_t hdrlen,
2379 struct gro_remcsum *grc, 2407 int start, int offset,
2380 bool nopartial) 2408 struct gro_remcsum *grc,
2409 bool nopartial)
2381{ 2410{
2382 __wsum delta; 2411 __wsum delta;
2412 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2383 2413
2384 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); 2414 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2385 2415
2386 if (!nopartial) { 2416 if (!nopartial) {
2387 NAPI_GRO_CB(skb)->gro_remcsum_start = 2417 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2388 ((unsigned char *)ptr + start) - skb->head; 2418 return ptr;
2389 return; 2419 }
2420
2421 ptr = skb_gro_header_fast(skb, off);
2422 if (skb_gro_header_hard(skb, off + plen)) {
2423 ptr = skb_gro_header_slow(skb, off + plen, off);
2424 if (!ptr)
2425 return NULL;
2390 } 2426 }
2391 2427
2392 delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset); 2428 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2429 start, offset);
2393 2430
2394 /* Adjust skb->csum since we changed the packet */ 2431 /* Adjust skb->csum since we changed the packet */
2395 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); 2432 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2396 2433
2397 grc->offset = (ptr + offset) - (void *)skb->head; 2434 grc->offset = off + hdrlen + offset;
2398 grc->delta = delta; 2435 grc->delta = delta;
2436
2437 return ptr;
2399} 2438}
2400 2439
2401static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, 2440static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2402 struct gro_remcsum *grc) 2441 struct gro_remcsum *grc)
2403{ 2442{
2443 void *ptr;
2444 size_t plen = grc->offset + sizeof(u16);
2445
2404 if (!grc->delta) 2446 if (!grc->delta)
2405 return; 2447 return;
2406 2448
2407 remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta); 2449 ptr = skb_gro_header_fast(skb, grc->offset);
2450 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2451 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2452 if (!ptr)
2453 return;
2454 }
2455
2456 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2408} 2457}
2409 2458
2410static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2459static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2982,6 +3031,7 @@ int dev_get_phys_port_id(struct net_device *dev,
2982 struct netdev_phys_item_id *ppid); 3031 struct netdev_phys_item_id *ppid);
2983int dev_get_phys_port_name(struct net_device *dev, 3032int dev_get_phys_port_name(struct net_device *dev,
2984 char *name, size_t len); 3033 char *name, size_t len);
3034int dev_change_proto_down(struct net_device *dev, bool proto_down);
2985struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3035struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2986struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3036struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2987 struct netdev_queue *txq, int *ret); 3037 struct netdev_queue *txq, int *ret);
@@ -3781,6 +3831,42 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
3781 return dev->priv_flags & IFF_SUPP_NOFCS; 3831 return dev->priv_flags & IFF_SUPP_NOFCS;
3782} 3832}
3783 3833
3834static inline bool netif_is_vrf(const struct net_device *dev)
3835{
3836 return dev->priv_flags & IFF_VRF_MASTER;
3837}
3838
3839static inline bool netif_is_bridge_master(const struct net_device *dev)
3840{
3841 return dev->priv_flags & IFF_EBRIDGE;
3842}
3843
3844static inline bool netif_is_ovs_master(const struct net_device *dev)
3845{
3846 return dev->priv_flags & IFF_OPENVSWITCH;
3847}
3848
3849static inline bool netif_index_is_vrf(struct net *net, int ifindex)
3850{
3851 bool rc = false;
3852
3853#if IS_ENABLED(CONFIG_NET_VRF)
3854 struct net_device *dev;
3855
3856 if (ifindex == 0)
3857 return false;
3858
3859 rcu_read_lock();
3860
3861 dev = dev_get_by_index_rcu(net, ifindex);
3862 if (dev)
3863 rc = netif_is_vrf(dev);
3864
3865 rcu_read_unlock();
3866#endif
3867 return rc;
3868}
3869
3784/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 3870/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
3785static inline void netif_keep_dst(struct net_device *dev) 3871static inline void netif_keep_dst(struct net_device *dev)
3786{ 3872{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050dfd9f23..36a652531791 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/static_key.h> 12#include <linux/static_key.h>
13#include <linux/netfilter_defs.h> 13#include <linux/netfilter_defs.h>
14#include <linux/netdevice.h>
15#include <net/net_namespace.h>
14 16
15#ifdef CONFIG_NETFILTER 17#ifdef CONFIG_NETFILTER
16static inline int NF_DROP_GETERR(int verdict) 18static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@ struct nf_sockopt_ops {
118}; 120};
119 121
120/* Function to register/unregister hook points. */ 122/* Function to register/unregister hook points. */
123int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
124void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
125int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
126 unsigned int n);
127void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
128 unsigned int n);
129
121int nf_register_hook(struct nf_hook_ops *reg); 130int nf_register_hook(struct nf_hook_ops *reg);
122void nf_unregister_hook(struct nf_hook_ops *reg); 131void nf_unregister_hook(struct nf_hook_ops *reg);
123int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); 132int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
128int nf_register_sockopt(struct nf_sockopt_ops *reg); 137int nf_register_sockopt(struct nf_sockopt_ops *reg);
129void nf_unregister_sockopt(struct nf_sockopt_ops *reg); 138void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
130 139
131extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
132
133#ifdef HAVE_JUMP_LABEL 140#ifdef HAVE_JUMP_LABEL
134extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 141extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
135 142
136static inline bool nf_hook_list_active(struct list_head *nf_hook_list, 143static inline bool nf_hook_list_active(struct list_head *hook_list,
137 u_int8_t pf, unsigned int hook) 144 u_int8_t pf, unsigned int hook)
138{ 145{
139 if (__builtin_constant_p(pf) && 146 if (__builtin_constant_p(pf) &&
140 __builtin_constant_p(hook)) 147 __builtin_constant_p(hook))
141 return static_key_false(&nf_hooks_needed[pf][hook]); 148 return static_key_false(&nf_hooks_needed[pf][hook]);
142 149
143 return !list_empty(nf_hook_list); 150 return !list_empty(hook_list);
144} 151}
145#else 152#else
146static inline bool nf_hook_list_active(struct list_head *nf_hook_list, 153static inline bool nf_hook_list_active(struct list_head *hook_list,
147 u_int8_t pf, unsigned int hook) 154 u_int8_t pf, unsigned int hook)
148{ 155{
149 return !list_empty(nf_hook_list); 156 return !list_empty(hook_list);
150} 157}
151#endif 158#endif
152 159
153static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
154{
155 return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
156}
157
158int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); 160int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
159 161
160/** 162/**
@@ -172,10 +174,13 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
172 int (*okfn)(struct sock *, struct sk_buff *), 174 int (*okfn)(struct sock *, struct sk_buff *),
173 int thresh) 175 int thresh)
174{ 176{
175 if (nf_hooks_active(pf, hook)) { 177 struct net *net = dev_net(indev ? indev : outdev);
178 struct list_head *hook_list = &net->nf.hooks[pf][hook];
179
180 if (nf_hook_list_active(hook_list, pf, hook)) {
176 struct nf_hook_state state; 181 struct nf_hook_state state;
177 182
178 nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh, 183 nf_hook_state_init(&state, hook_list, hook, thresh,
179 pf, indev, outdev, sk, okfn); 184 pf, indev, outdev, sk, okfn);
180 return nf_hook_slow(skb, &state); 185 return nf_hook_slow(skb, &state);
181 } 186 }
@@ -363,6 +368,8 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
363#endif /*CONFIG_NETFILTER*/ 368#endif /*CONFIG_NETFILTER*/
364 369
365#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 370#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
371#include <linux/netfilter/nf_conntrack_zones_common.h>
372
366extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; 373extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
367void nf_ct_attach(struct sk_buff *, const struct sk_buff *); 374void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
368extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; 375extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
@@ -385,4 +392,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
385static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 392static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
386#endif 393#endif
387 394
395/**
396 * nf_skb_duplicated - TEE target has sent a packet
397 *
398 * When a xtables target sends a packet, the OUTPUT and POSTROUTING
399 * hooks are traversed again, i.e. nft and xtables are invoked recursively.
400 *
401 * This is used by xtables TEE target to prevent the duplicated skb from
402 * being duplicated again.
403 */
404DECLARE_PER_CPU(bool, nf_skb_duplicated);
405
388#endif /*__LINUX_NETFILTER_H*/ 406#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 000000000000..5d7cf36d4766
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
1#ifndef _NF_CONNTRACK_ZONES_COMMON_H
2#define _NF_CONNTRACK_ZONES_COMMON_H
3
4#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
5
6#define NF_CT_DEFAULT_ZONE_ID 0
7
8#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
9#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
10
11#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
12
13#define NF_CT_FLAG_MARK 1
14
15struct nf_conntrack_zone {
16 u16 id;
17 u8 flags;
18 u8 dir;
19};
20
21extern const struct nf_conntrack_zone nf_ct_zone_dflt;
22
23#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 6ec975748742..80ca889b164e 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
2#define _NFNL_ACCT_H_ 2#define _NFNL_ACCT_H_
3 3
4#include <uapi/linux/netfilter/nfnetlink_acct.h> 4#include <uapi/linux/netfilter/nfnetlink_acct.h>
5#include <net/net_namespace.h>
5 6
6enum { 7enum {
7 NFACCT_NO_QUOTA = -1, 8 NFACCT_NO_QUOTA = -1,
@@ -11,7 +12,7 @@ enum {
11 12
12struct nf_acct; 13struct nf_acct;
13 14
14struct nf_acct *nfnl_acct_find_get(const char *filter_name); 15struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
15void nfnl_acct_put(struct nf_acct *acct); 16void nfnl_acct_put(struct nf_acct *acct);
16void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); 17void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
17extern int nfnl_acct_overquota(const struct sk_buff *skb, 18extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a5667f..b006b719183f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
3 3
4 4
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/static_key.h>
6#include <uapi/linux/netfilter/x_tables.h> 7#include <uapi/linux/netfilter/x_tables.h>
7 8
8/** 9/**
@@ -222,7 +223,6 @@ struct xt_table_info {
222 * @stacksize jumps (number of user chains) can possibly be made. 223 * @stacksize jumps (number of user chains) can possibly be made.
223 */ 224 */
224 unsigned int stacksize; 225 unsigned int stacksize;
225 unsigned int __percpu *stackptr;
226 void ***jumpstack; 226 void ***jumpstack;
227 227
228 unsigned char entries[0] __aligned(8); 228 unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
281 */ 281 */
282DECLARE_PER_CPU(seqcount_t, xt_recseq); 282DECLARE_PER_CPU(seqcount_t, xt_recseq);
283 283
284/* xt_tee_enabled - true if x_tables needs to handle reentrancy
285 *
286 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
287 */
288extern struct static_key xt_tee_enabled;
289
284/** 290/**
285 * xt_write_recseq_begin - start of a write section 291 * xt_write_recseq_begin - start of a write section
286 * 292 *
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc686323..2437b8a5d7a9 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@ enum nf_br_hook_priorities {
17 17
18#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 18#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
19 19
20#define BRNF_BRIDGED_DNAT 0x02
21#define BRNF_NF_BRIDGE_PREROUTING 0x08
22
23int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); 20int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
24 21
25static inline void br_drop_fake_rtable(struct sk_buff *skb) 22static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
63{ 60{
64 return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; 61 return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
65} 62}
63
64static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
65{
66 return skb->nf_bridge && skb->nf_bridge->in_prerouting;
67}
66#else 68#else
67#define br_drop_fake_rtable(skb) do { } while (0) 69#define br_drop_fake_rtable(skb) do { } while (0)
70static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
71{
72 return false;
73}
68#endif /* CONFIG_BRIDGE_NETFILTER */ 74#endif /* CONFIG_BRIDGE_NETFILTER */
69 75
70#endif 76#endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 8b7d28f3aada..771574677e83 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
9 9
10#include <uapi/linux/netfilter_ipv6.h> 10#include <uapi/linux/netfilter_ipv6.h>
11 11
12
13#ifdef CONFIG_NETFILTER
14int ip6_route_me_harder(struct sk_buff *skb);
15__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
16 unsigned int dataoff, u_int8_t protocol);
17
18int ipv6_netfilter_init(void);
19void ipv6_netfilter_fini(void);
20
21/* 12/*
22 * Hook functions for ipv6 to allow xt_* modules to be built-in even 13 * Hook functions for ipv6 to allow xt_* modules to be built-in even
23 * if IPv6 is a module. 14 * if IPv6 is a module.
@@ -30,6 +21,14 @@ struct nf_ipv6_ops {
30 int (*output)(struct sock *, struct sk_buff *)); 21 int (*output)(struct sock *, struct sk_buff *));
31}; 22};
32 23
24#ifdef CONFIG_NETFILTER
25int ip6_route_me_harder(struct sk_buff *skb);
26__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
27 unsigned int dataoff, u_int8_t protocol);
28
29int ipv6_netfilter_init(void);
30void ipv6_netfilter_fini(void);
31
33extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; 32extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
34static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) 33static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
35{ 34{
@@ -39,6 +38,7 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
39#else /* CONFIG_NETFILTER */ 38#else /* CONFIG_NETFILTER */
40static inline int ipv6_netfilter_init(void) { return 0; } 39static inline int ipv6_netfilter_init(void) { return 0; }
41static inline void ipv6_netfilter_fini(void) { return; } 40static inline void ipv6_netfilter_fini(void) { return; }
41static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
42#endif /* CONFIG_NETFILTER */ 42#endif /* CONFIG_NETFILTER */
43 43
44#endif /*__LINUX_IP6_NETFILTER_H*/ 44#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9120edb650a0..639e9b8b0e4d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -68,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
68extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); 68extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
69extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 69extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
70extern int netlink_has_listeners(struct sock *sk, unsigned int group); 70extern int netlink_has_listeners(struct sock *sk, unsigned int group);
71extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, 71
72 u32 dst_portid, gfp_t gfp_mask); 72extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
73 unsigned int ldiff, u32 dst_portid,
74 gfp_t gfp_mask);
75static inline struct sk_buff *
76netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
77 gfp_t gfp_mask)
78{
79 return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
80}
81
73extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); 82extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
74extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, 83extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
75 __u32 group, gfp_t allocation); 84 __u32 group, gfp_t allocation);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b8e72aad919c..00121f298269 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -547,6 +547,24 @@ enum pnfs_notify_deviceid_type4 {
547 NOTIFY_DEVICEID4_DELETE = 1 << 2, 547 NOTIFY_DEVICEID4_DELETE = 1 << 2,
548}; 548};
549 549
550enum pnfs_block_volume_type {
551 PNFS_BLOCK_VOLUME_SIMPLE = 0,
552 PNFS_BLOCK_VOLUME_SLICE = 1,
553 PNFS_BLOCK_VOLUME_CONCAT = 2,
554 PNFS_BLOCK_VOLUME_STRIPE = 3,
555};
556
557enum pnfs_block_extent_state {
558 PNFS_BLOCK_READWRITE_DATA = 0,
559 PNFS_BLOCK_READ_DATA = 1,
560 PNFS_BLOCK_INVALID_DATA = 2,
561 PNFS_BLOCK_NONE_DATA = 3,
562};
563
564/* on the wire size of a block layout extent */
565#define PNFS_BLOCK_EXTENT_SIZE \
566 (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
567
550#define NFL4_UFLG_MASK 0x0000003F 568#define NFL4_UFLG_MASK 0x0000003F
551#define NFL4_UFLG_DENSE 0x00000001 569#define NFL4_UFLG_DENSE 0x00000001
552#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002 570#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 874b77228fb9..c0e961474a52 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -353,7 +353,6 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
353extern void nfs_access_set_mask(struct nfs_access_entry *, u32); 353extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
354extern int nfs_permission(struct inode *, int); 354extern int nfs_permission(struct inode *, int);
355extern int nfs_open(struct inode *, struct file *); 355extern int nfs_open(struct inode *, struct file *);
356extern int nfs_release(struct inode *, struct file *);
357extern int nfs_attribute_timeout(struct inode *inode); 356extern int nfs_attribute_timeout(struct inode *inode);
358extern int nfs_attribute_cache_expired(struct inode *inode); 357extern int nfs_attribute_cache_expired(struct inode *inode);
359extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); 358extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
@@ -371,6 +370,7 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
371extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); 370extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
372extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); 371extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
373extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); 372extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
373extern void nfs_file_clear_open_context(struct file *flip);
374extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); 374extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
375extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); 375extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
376extern u64 nfs_compat_user_ino64(u64 fileid); 376extern u64 nfs_compat_user_ino64(u64 fileid);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20bc8e51b161..570a7df2775b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -173,6 +173,11 @@ struct nfs_server {
173 set of attributes supported 173 set of attributes supported
174 on this filesystem excluding 174 on this filesystem excluding
175 the label support bit. */ 175 the label support bit. */
176 u32 exclcreat_bitmask[3];
177 /* V4 bitmask representing the
178 set of attributes supported
179 on this filesystem for the
180 exclusive create. */
176 u32 cache_consistency_bitmask[3]; 181 u32 cache_consistency_bitmask[3];
177 /* V4 bitmask representing the subset 182 /* V4 bitmask representing the subset
178 of change attribute, size, ctime 183 of change attribute, size, ctime
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7bbe50504211..52faf7e96c65 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,7 +379,7 @@ struct nfs_openargs {
379 struct stateowner_id id; 379 struct stateowner_id id;
380 union { 380 union {
381 struct { 381 struct {
382 struct iattr * attrs; /* UNCHECKED, GUARDED */ 382 struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */
383 nfs4_verifier verifier; /* EXCLUSIVE */ 383 nfs4_verifier verifier; /* EXCLUSIVE */
384 }; 384 };
385 nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ 385 nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
@@ -389,7 +389,7 @@ struct nfs_openargs {
389 const struct nfs_server *server; /* Needed for ID mapping */ 389 const struct nfs_server *server; /* Needed for ID mapping */
390 const u32 * bitmask; 390 const u32 * bitmask;
391 const u32 * open_bitmap; 391 const u32 * open_bitmap;
392 __u32 claim; 392 enum open_claim_type4 claim;
393 enum createmode4 createmode; 393 enum createmode4 createmode;
394 const struct nfs4_label *label; 394 const struct nfs4_label *label;
395}; 395};
@@ -406,8 +406,8 @@ struct nfs_openres {
406 const struct nfs_server *server; 406 const struct nfs_server *server;
407 fmode_t delegation_type; 407 fmode_t delegation_type;
408 nfs4_stateid delegation; 408 nfs4_stateid delegation;
409 unsigned long pagemod_limit;
409 __u32 do_recall; 410 __u32 do_recall;
410 __u64 maxsize;
411 __u32 attrset[NFS4_BITMAP_SIZE]; 411 __u32 attrset[NFS4_BITMAP_SIZE];
412 struct nfs4_string *owner; 412 struct nfs4_string *owner;
413 struct nfs4_string *group_owner; 413 struct nfs4_string *group_owner;
@@ -1057,11 +1057,13 @@ struct nfs4_statfs_res {
1057struct nfs4_server_caps_arg { 1057struct nfs4_server_caps_arg {
1058 struct nfs4_sequence_args seq_args; 1058 struct nfs4_sequence_args seq_args;
1059 struct nfs_fh *fhandle; 1059 struct nfs_fh *fhandle;
1060 const u32 * bitmask;
1060}; 1061};
1061 1062
1062struct nfs4_server_caps_res { 1063struct nfs4_server_caps_res {
1063 struct nfs4_sequence_res seq_res; 1064 struct nfs4_sequence_res seq_res;
1064 u32 attr_bitmask[3]; 1065 u32 attr_bitmask[3];
1066 u32 exclcreat_bitmask[3];
1065 u32 acl_bitmask; 1067 u32 acl_bitmask;
1066 u32 has_links; 1068 u32 has_links;
1067 u32 has_symlinks; 1069 u32 has_symlinks;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f94da0e65dea..78488e099ce7 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -27,9 +27,7 @@ static inline void touch_nmi_watchdog(void)
27#if defined(CONFIG_HARDLOCKUP_DETECTOR) 27#if defined(CONFIG_HARDLOCKUP_DETECTOR)
28extern void hardlockup_detector_disable(void); 28extern void hardlockup_detector_disable(void);
29#else 29#else
30static inline void hardlockup_detector_disable(void) 30static inline void hardlockup_detector_disable(void) {}
31{
32}
33#endif 31#endif
34 32
35/* 33/*
@@ -49,6 +47,12 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
49 arch_trigger_all_cpu_backtrace(false); 47 arch_trigger_all_cpu_backtrace(false);
50 return true; 48 return true;
51} 49}
50
51/* generic implementation */
52void nmi_trigger_all_cpu_backtrace(bool include_self,
53 void (*raise)(cpumask_t *mask));
54bool nmi_cpu_backtrace(struct pt_regs *regs);
55
52#else 56#else
53static inline bool trigger_all_cpu_backtrace(void) 57static inline bool trigger_all_cpu_backtrace(void)
54{ 58{
@@ -80,6 +84,17 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
80 void __user *, size_t *, loff_t *); 84 void __user *, size_t *, loff_t *);
81extern int proc_watchdog_cpumask(struct ctl_table *, int, 85extern int proc_watchdog_cpumask(struct ctl_table *, int,
82 void __user *, size_t *, loff_t *); 86 void __user *, size_t *, loff_t *);
87extern int lockup_detector_suspend(void);
88extern void lockup_detector_resume(void);
89#else
90static inline int lockup_detector_suspend(void)
91{
92 return 0;
93}
94
95static inline void lockup_detector_resume(void)
96{
97}
83#endif 98#endif
84 99
85#ifdef CONFIG_HAVE_ACPI_APEI_NMI 100#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c0d94ed8ce9a..b5812c395351 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -28,18 +28,32 @@ struct nvme_bar {
28 __u32 cc; /* Controller Configuration */ 28 __u32 cc; /* Controller Configuration */
29 __u32 rsvd1; /* Reserved */ 29 __u32 rsvd1; /* Reserved */
30 __u32 csts; /* Controller Status */ 30 __u32 csts; /* Controller Status */
31 __u32 rsvd2; /* Reserved */ 31 __u32 nssr; /* Subsystem Reset */
32 __u32 aqa; /* Admin Queue Attributes */ 32 __u32 aqa; /* Admin Queue Attributes */
33 __u64 asq; /* Admin SQ Base Address */ 33 __u64 asq; /* Admin SQ Base Address */
34 __u64 acq; /* Admin CQ Base Address */ 34 __u64 acq; /* Admin CQ Base Address */
35 __u32 cmbloc; /* Controller Memory Buffer Location */
36 __u32 cmbsz; /* Controller Memory Buffer Size */
35}; 37};
36 38
37#define NVME_CAP_MQES(cap) ((cap) & 0xffff) 39#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
38#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) 40#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
39#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) 41#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
42#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
40#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) 43#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
41#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) 44#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
42 45
46#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
47#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
48#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
49#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
50
51#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
52#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
53#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
54#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
55#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
56
43enum { 57enum {
44 NVME_CC_ENABLE = 1 << 0, 58 NVME_CC_ENABLE = 1 << 0,
45 NVME_CC_CSS_NVM = 0 << 4, 59 NVME_CC_CSS_NVM = 0 << 4,
@@ -55,6 +69,7 @@ enum {
55 NVME_CC_IOCQES = 4 << 20, 69 NVME_CC_IOCQES = 4 << 20,
56 NVME_CSTS_RDY = 1 << 0, 70 NVME_CSTS_RDY = 1 << 0,
57 NVME_CSTS_CFS = 1 << 1, 71 NVME_CSTS_CFS = 1 << 1,
72 NVME_CSTS_NSSRO = 1 << 4,
58 NVME_CSTS_SHST_NORMAL = 0 << 2, 73 NVME_CSTS_SHST_NORMAL = 0 << 2,
59 NVME_CSTS_SHST_OCCUR = 1 << 2, 74 NVME_CSTS_SHST_OCCUR = 1 << 2,
60 NVME_CSTS_SHST_CMPLT = 2 << 2, 75 NVME_CSTS_SHST_CMPLT = 2 << 2,
@@ -97,9 +112,14 @@ struct nvme_dev {
97 char serial[20]; 112 char serial[20];
98 char model[40]; 113 char model[40];
99 char firmware_rev[8]; 114 char firmware_rev[8];
115 bool subsystem;
100 u32 max_hw_sectors; 116 u32 max_hw_sectors;
101 u32 stripe_size; 117 u32 stripe_size;
102 u32 page_size; 118 u32 page_size;
119 void __iomem *cmb;
120 dma_addr_t cmb_dma_addr;
121 u64 cmb_size;
122 u32 cmbsz;
103 u16 oncs; 123 u16 oncs;
104 u16 abort_limit; 124 u16 abort_limit;
105 u8 event_limit; 125 u8 event_limit;
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
new file mode 100644
index 000000000000..9bb77d3ed6e0
--- /dev/null
+++ b/include/linux/nvmem-consumer.h
@@ -0,0 +1,157 @@
1/*
2 * nvmem framework consumer.
3 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#ifndef _LINUX_NVMEM_CONSUMER_H
13#define _LINUX_NVMEM_CONSUMER_H
14
15struct device;
16struct device_node;
17/* consumer cookie */
18struct nvmem_cell;
19struct nvmem_device;
20
21struct nvmem_cell_info {
22 const char *name;
23 unsigned int offset;
24 unsigned int bytes;
25 unsigned int bit_offset;
26 unsigned int nbits;
27};
28
29#if IS_ENABLED(CONFIG_NVMEM)
30
31/* Cell based interface */
32struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
33struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
34void nvmem_cell_put(struct nvmem_cell *cell);
35void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
36void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
37int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
38
39/* direct nvmem device read/write interface */
40struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
41struct nvmem_device *devm_nvmem_device_get(struct device *dev,
42 const char *name);
43void nvmem_device_put(struct nvmem_device *nvmem);
44void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
45int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
46 size_t bytes, void *buf);
47int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
48 size_t bytes, void *buf);
49ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
50 struct nvmem_cell_info *info, void *buf);
51int nvmem_device_cell_write(struct nvmem_device *nvmem,
52 struct nvmem_cell_info *info, void *buf);
53
54#else
55
56static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
57 const char *name)
58{
59 return ERR_PTR(-ENOSYS);
60}
61
62static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
63 const char *name)
64{
65 return ERR_PTR(-ENOSYS);
66}
67
68static inline void devm_nvmem_cell_put(struct device *dev,
69 struct nvmem_cell *cell)
70{
71
72}
73static inline void nvmem_cell_put(struct nvmem_cell *cell)
74{
75}
76
77static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
78{
79 return ERR_PTR(-ENOSYS);
80}
81
82static inline int nvmem_cell_write(struct nvmem_cell *cell,
83 const char *buf, size_t len)
84{
85 return -ENOSYS;
86}
87
88static inline struct nvmem_device *nvmem_device_get(struct device *dev,
89 const char *name)
90{
91 return ERR_PTR(-ENOSYS);
92}
93
94static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
95 const char *name)
96{
97 return ERR_PTR(-ENOSYS);
98}
99
100static inline void nvmem_device_put(struct nvmem_device *nvmem)
101{
102}
103
104static inline void devm_nvmem_device_put(struct device *dev,
105 struct nvmem_device *nvmem)
106{
107}
108
109static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
110 struct nvmem_cell_info *info,
111 void *buf)
112{
113 return -ENOSYS;
114}
115
116static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
117 struct nvmem_cell_info *info,
118 void *buf)
119{
120 return -ENOSYS;
121}
122
123static inline int nvmem_device_read(struct nvmem_device *nvmem,
124 unsigned int offset, size_t bytes,
125 void *buf)
126{
127 return -ENOSYS;
128}
129
130static inline int nvmem_device_write(struct nvmem_device *nvmem,
131 unsigned int offset, size_t bytes,
132 void *buf)
133{
134 return -ENOSYS;
135}
136#endif /* CONFIG_NVMEM */
137
138#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
139struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
140 const char *name);
141struct nvmem_device *of_nvmem_device_get(struct device_node *np,
142 const char *name);
143#else
144static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
145 const char *name)
146{
147 return ERR_PTR(-ENOSYS);
148}
149
150static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
151 const char *name)
152{
153 return ERR_PTR(-ENOSYS);
154}
155#endif /* CONFIG_NVMEM && CONFIG_OF */
156
157#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
new file mode 100644
index 000000000000..0b68caff1b3c
--- /dev/null
+++ b/include/linux/nvmem-provider.h
@@ -0,0 +1,47 @@
1/*
2 * nvmem framework provider.
3 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#ifndef _LINUX_NVMEM_PROVIDER_H
13#define _LINUX_NVMEM_PROVIDER_H
14
15struct nvmem_device;
16struct nvmem_cell_info;
17
18struct nvmem_config {
19 struct device *dev;
20 const char *name;
21 int id;
22 struct module *owner;
23 const struct nvmem_cell_info *cells;
24 int ncells;
25 bool read_only;
26};
27
28#if IS_ENABLED(CONFIG_NVMEM)
29
30struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
31int nvmem_unregister(struct nvmem_device *nvmem);
32
33#else
34
35static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
36{
37 return ERR_PTR(-ENOSYS);
38}
39
40static inline int nvmem_unregister(struct nvmem_device *nvmem)
41{
42 return -ENOSYS;
43}
44
45#endif /* CONFIG_NVMEM */
46
47#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index edc068d19c79..2194b8ca41f9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,8 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
136 136
137static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) 137static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
138{ 138{
139 return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; 139 return is_of_node(fwnode) ?
140 container_of(fwnode, struct device_node, fwnode) : NULL;
140} 141}
141 142
142static inline bool of_have_populated_dt(void) 143static inline bool of_have_populated_dt(void)
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 69dbe312b11b..f3191828f037 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -54,7 +54,7 @@ extern int of_mm_gpiochip_add(struct device_node *np,
54 struct of_mm_gpio_chip *mm_gc); 54 struct of_mm_gpio_chip *mm_gc);
55extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); 55extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
56 56
57extern void of_gpiochip_add(struct gpio_chip *gc); 57extern int of_gpiochip_add(struct gpio_chip *gc);
58extern void of_gpiochip_remove(struct gpio_chip *gc); 58extern void of_gpiochip_remove(struct gpio_chip *gc);
59extern int of_gpio_simple_xlate(struct gpio_chip *gc, 59extern int of_gpio_simple_xlate(struct gpio_chip *gc,
60 const struct of_phandle_args *gpiospec, 60 const struct of_phandle_args *gpiospec,
@@ -76,7 +76,7 @@ static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
76 return -ENOSYS; 76 return -ENOSYS;
77} 77}
78 78
79static inline void of_gpiochip_add(struct gpio_chip *gc) { } 79static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
80static inline void of_gpiochip_remove(struct gpio_chip *gc) { } 80static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
81 81
82#endif /* CONFIG_OF_GPIO */ 82#endif /* CONFIG_OF_GPIO */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d884929a7747..4bcbd586a672 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -74,6 +74,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
74 */ 74 */
75extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); 75extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
76extern struct device_node *of_irq_find_parent(struct device_node *child); 76extern struct device_node *of_irq_find_parent(struct device_node *child);
77extern void of_msi_configure(struct device *dev, struct device_node *np);
77 78
78#else /* !CONFIG_OF */ 79#else /* !CONFIG_OF */
79static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 80static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 611a691145c4..956a1006aefc 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -72,6 +72,9 @@ extern int of_platform_populate(struct device_node *root,
72 const struct of_device_id *matches, 72 const struct of_device_id *matches,
73 const struct of_dev_auxdata *lookup, 73 const struct of_dev_auxdata *lookup,
74 struct device *parent); 74 struct device *parent);
75extern int of_platform_default_populate(struct device_node *root,
76 const struct of_dev_auxdata *lookup,
77 struct device *parent);
75extern void of_platform_depopulate(struct device *parent); 78extern void of_platform_depopulate(struct device *parent);
76#else 79#else
77static inline int of_platform_populate(struct device_node *root, 80static inline int of_platform_populate(struct device_node *root,
@@ -81,6 +84,12 @@ static inline int of_platform_populate(struct device_node *root,
81{ 84{
82 return -ENODEV; 85 return -ENODEV;
83} 86}
87static inline int of_platform_default_populate(struct device_node *root,
88 const struct of_dev_auxdata *lookup,
89 struct device *parent)
90{
91 return -ENODEV;
92}
84static inline void of_platform_depopulate(struct device *parent) { } 93static inline void of_platform_depopulate(struct device *parent) { }
85#endif 94#endif
86 95
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index c2bbf672b84e..d2fa9ca42e9a 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -41,7 +41,7 @@ enum OID {
41 OID_signed_data, /* 1.2.840.113549.1.7.2 */ 41 OID_signed_data, /* 1.2.840.113549.1.7.2 */
42 /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */ 42 /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */
43 OID_email_address, /* 1.2.840.113549.1.9.1 */ 43 OID_email_address, /* 1.2.840.113549.1.9.1 */
44 OID_content_type, /* 1.2.840.113549.1.9.3 */ 44 OID_contentType, /* 1.2.840.113549.1.9.3 */
45 OID_messageDigest, /* 1.2.840.113549.1.9.4 */ 45 OID_messageDigest, /* 1.2.840.113549.1.9.4 */
46 OID_signingTime, /* 1.2.840.113549.1.9.5 */ 46 OID_signingTime, /* 1.2.840.113549.1.9.5 */
47 OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ 47 OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
@@ -54,6 +54,8 @@ enum OID {
54 54
55 /* Microsoft Authenticode & Software Publishing */ 55 /* Microsoft Authenticode & Software Publishing */
56 OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ 56 OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
57 OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
58 OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */
57 OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */ 59 OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */
58 OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ 60 OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
59 OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ 61 OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
@@ -61,6 +63,9 @@ enum OID {
61 OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ 63 OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
62 OID_sha1, /* 1.3.14.3.2.26 */ 64 OID_sha1, /* 1.3.14.3.2.26 */
63 OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ 65 OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
66 OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
67 OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
68 OID_sha224, /* 2.16.840.1.101.3.4.2.4 */
64 69
65 /* Distinguished Name attribute IDs [RFC 2256] */ 70 /* Distinguished Name attribute IDs [RFC 2256] */
66 OID_commonName, /* 2.5.4.3 */ 71 OID_commonName, /* 2.5.4.3 */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7deecb7bca5e..03e6257321f0 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -13,6 +13,27 @@ struct mem_cgroup;
13struct task_struct; 13struct task_struct;
14 14
15/* 15/*
16 * Details of the page allocation that triggered the oom killer that are used to
17 * determine what should be killed.
18 */
19struct oom_control {
20 /* Used to determine cpuset */
21 struct zonelist *zonelist;
22
23 /* Used to determine mempolicy */
24 nodemask_t *nodemask;
25
26 /* Used to determine cpuset and node locality requirement */
27 const gfp_t gfp_mask;
28
29 /*
30 * order == -1 means the oom kill is required by sysrq, otherwise only
31 * for display purposes.
32 */
33 const int order;
34};
35
36/*
16 * Types of limitations to the nodes from which allocations may occur 37 * Types of limitations to the nodes from which allocations may occur
17 */ 38 */
18enum oom_constraint { 39enum oom_constraint {
@@ -57,21 +78,18 @@ extern unsigned long oom_badness(struct task_struct *p,
57 78
58extern int oom_kills_count(void); 79extern int oom_kills_count(void);
59extern void note_oom_kill(void); 80extern void note_oom_kill(void);
60extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 81extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
61 unsigned int points, unsigned long totalpages, 82 unsigned int points, unsigned long totalpages,
62 struct mem_cgroup *memcg, nodemask_t *nodemask, 83 struct mem_cgroup *memcg, const char *message);
63 const char *message);
64 84
65extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 85extern void check_panic_on_oom(struct oom_control *oc,
66 int order, const nodemask_t *nodemask, 86 enum oom_constraint constraint,
67 struct mem_cgroup *memcg); 87 struct mem_cgroup *memcg);
68 88
69extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, 89extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
70 unsigned long totalpages, const nodemask_t *nodemask, 90 struct task_struct *task, unsigned long totalpages);
71 bool force_kill);
72 91
73extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 92extern bool out_of_memory(struct oom_control *oc);
74 int order, nodemask_t *mask, bool force_kill);
75 93
76extern void exit_oom_victim(void); 94extern void exit_oom_victim(void);
77 95
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 41c93844fb1d..416509e26d6d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -109,6 +109,10 @@ enum pageflags {
109#ifdef CONFIG_TRANSPARENT_HUGEPAGE 109#ifdef CONFIG_TRANSPARENT_HUGEPAGE
110 PG_compound_lock, 110 PG_compound_lock,
111#endif 111#endif
112#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
113 PG_young,
114 PG_idle,
115#endif
112 __NR_PAGEFLAGS, 116 __NR_PAGEFLAGS,
113 117
114 /* Filesystems */ 118 /* Filesystems */
@@ -289,6 +293,13 @@ PAGEFLAG_FALSE(HWPoison)
289#define __PG_HWPOISON 0 293#define __PG_HWPOISON 0
290#endif 294#endif
291 295
296#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
297TESTPAGEFLAG(Young, young)
298SETPAGEFLAG(Young, young)
299TESTCLEARFLAG(Young, young)
300PAGEFLAG(Idle, idle)
301#endif
302
292/* 303/*
293 * On an anonymous page mapped into a user virtual memory area, 304 * On an anonymous page mapped into a user virtual memory area,
294 * page->mapping points to its anon_vma, not to a struct address_space; 305 * page->mapping points to its anon_vma, not to a struct address_space;
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 2dc1e1697b45..047d64706f2a 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -65,11 +65,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
65int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, 65int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
66 bool skip_hwpoisoned_pages); 66 bool skip_hwpoisoned_pages);
67 67
68/*
69 * Internal functions. Changes pageblock's migrate type.
70 */
71int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
72void unset_migratetype_isolate(struct page *page, unsigned migratetype);
73struct page *alloc_migrate_target(struct page *page, unsigned long private, 68struct page *alloc_migrate_target(struct page *page, unsigned long private,
74 int **resultp); 69 int **resultp);
75 70
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index c42981cd99aa..17f118a82854 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -26,6 +26,10 @@ enum page_ext_flags {
26 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ 26 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
27 PAGE_EXT_DEBUG_GUARD, 27 PAGE_EXT_DEBUG_GUARD,
28 PAGE_EXT_OWNER, 28 PAGE_EXT_OWNER,
29#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
30 PAGE_EXT_YOUNG,
31 PAGE_EXT_IDLE,
32#endif
29}; 33};
30 34
31/* 35/*
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
new file mode 100644
index 000000000000..bf268fa92c5b
--- /dev/null
+++ b/include/linux/page_idle.h
@@ -0,0 +1,110 @@
1#ifndef _LINUX_MM_PAGE_IDLE_H
2#define _LINUX_MM_PAGE_IDLE_H
3
4#include <linux/bitops.h>
5#include <linux/page-flags.h>
6#include <linux/page_ext.h>
7
8#ifdef CONFIG_IDLE_PAGE_TRACKING
9
10#ifdef CONFIG_64BIT
11static inline bool page_is_young(struct page *page)
12{
13 return PageYoung(page);
14}
15
16static inline void set_page_young(struct page *page)
17{
18 SetPageYoung(page);
19}
20
21static inline bool test_and_clear_page_young(struct page *page)
22{
23 return TestClearPageYoung(page);
24}
25
26static inline bool page_is_idle(struct page *page)
27{
28 return PageIdle(page);
29}
30
31static inline void set_page_idle(struct page *page)
32{
33 SetPageIdle(page);
34}
35
36static inline void clear_page_idle(struct page *page)
37{
38 ClearPageIdle(page);
39}
40#else /* !CONFIG_64BIT */
41/*
42 * If there is not enough space to store Idle and Young bits in page flags, use
43 * page ext flags instead.
44 */
45extern struct page_ext_operations page_idle_ops;
46
47static inline bool page_is_young(struct page *page)
48{
49 return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
50}
51
52static inline void set_page_young(struct page *page)
53{
54 set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
55}
56
57static inline bool test_and_clear_page_young(struct page *page)
58{
59 return test_and_clear_bit(PAGE_EXT_YOUNG,
60 &lookup_page_ext(page)->flags);
61}
62
63static inline bool page_is_idle(struct page *page)
64{
65 return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
66}
67
68static inline void set_page_idle(struct page *page)
69{
70 set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
71}
72
73static inline void clear_page_idle(struct page *page)
74{
75 clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
76}
77#endif /* CONFIG_64BIT */
78
79#else /* !CONFIG_IDLE_PAGE_TRACKING */
80
81static inline bool page_is_young(struct page *page)
82{
83 return false;
84}
85
86static inline void set_page_young(struct page *page)
87{
88}
89
90static inline bool test_and_clear_page_young(struct page *page)
91{
92 return false;
93}
94
95static inline bool page_is_idle(struct page *page)
96{
97 return false;
98}
99
100static inline void set_page_idle(struct page *page)
101{
102}
103
104static inline void clear_page_idle(struct page *page)
105{
106}
107
108#endif /* CONFIG_IDLE_PAGE_TRACKING */
109
110#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 72031785fe1d..57e0b8250947 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -3,55 +3,6 @@
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
6/* Address Translation Service */
7struct pci_ats {
8 int pos; /* capability position */
9 int stu; /* Smallest Translation Unit */
10 int qdep; /* Invalidate Queue Depth */
11 int ref_cnt; /* Physical Function reference count */
12 unsigned int is_enabled:1; /* Enable bit is set */
13};
14
15#ifdef CONFIG_PCI_ATS
16
17int pci_enable_ats(struct pci_dev *dev, int ps);
18void pci_disable_ats(struct pci_dev *dev);
19int pci_ats_queue_depth(struct pci_dev *dev);
20
21/**
22 * pci_ats_enabled - query the ATS status
23 * @dev: the PCI device
24 *
25 * Returns 1 if ATS capability is enabled, or 0 if not.
26 */
27static inline int pci_ats_enabled(struct pci_dev *dev)
28{
29 return dev->ats && dev->ats->is_enabled;
30}
31
32#else /* CONFIG_PCI_ATS */
33
34static inline int pci_enable_ats(struct pci_dev *dev, int ps)
35{
36 return -ENODEV;
37}
38
39static inline void pci_disable_ats(struct pci_dev *dev)
40{
41}
42
43static inline int pci_ats_queue_depth(struct pci_dev *dev)
44{
45 return -ENODEV;
46}
47
48static inline int pci_ats_enabled(struct pci_dev *dev)
49{
50 return 0;
51}
52
53#endif /* CONFIG_PCI_ATS */
54
55#ifdef CONFIG_PCI_PRI 6#ifdef CONFIG_PCI_PRI
56 7
57int pci_enable_pri(struct pci_dev *pdev, u32 reqs); 8int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8a0321a8fb59..e90eb22de628 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -180,6 +180,8 @@ enum pci_dev_flags {
180 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), 180 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
181 /* Do not use PM reset even if device advertises NoSoftRst- */ 181 /* Do not use PM reset even if device advertises NoSoftRst- */
182 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), 182 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
183 /* Get VPD from function 0 VPD */
184 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
183}; 185};
184 186
185enum pci_irq_reroute_variant { 187enum pci_irq_reroute_variant {
@@ -343,6 +345,7 @@ struct pci_dev {
343 unsigned int msi_enabled:1; 345 unsigned int msi_enabled:1;
344 unsigned int msix_enabled:1; 346 unsigned int msix_enabled:1;
345 unsigned int ari_enabled:1; /* ARI forwarding */ 347 unsigned int ari_enabled:1; /* ARI forwarding */
348 unsigned int ats_enabled:1; /* Address Translation Service */
346 unsigned int is_managed:1; 349 unsigned int is_managed:1;
347 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 350 unsigned int needs_freset:1; /* Dev requires fundamental reset */
348 unsigned int state_saved:1; 351 unsigned int state_saved:1;
@@ -366,7 +369,6 @@ struct pci_dev {
366 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 369 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
367 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 370 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
368#ifdef CONFIG_PCI_MSI 371#ifdef CONFIG_PCI_MSI
369 struct list_head msi_list;
370 const struct attribute_group **msi_irq_groups; 372 const struct attribute_group **msi_irq_groups;
371#endif 373#endif
372 struct pci_vpd *vpd; 374 struct pci_vpd *vpd;
@@ -375,7 +377,9 @@ struct pci_dev {
375 struct pci_sriov *sriov; /* SR-IOV capability related */ 377 struct pci_sriov *sriov; /* SR-IOV capability related */
376 struct pci_dev *physfn; /* the PF this VF is associated with */ 378 struct pci_dev *physfn; /* the PF this VF is associated with */
377 }; 379 };
378 struct pci_ats *ats; /* Address Translation Service */ 380 u16 ats_cap; /* ATS Capability offset */
381 u8 ats_stu; /* ATS Smallest Translation Unit */
382 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
379#endif 383#endif
380 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ 384 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
381 size_t romlen; /* Length of ROM if it's not from the BAR */ 385 size_t romlen; /* Length of ROM if it's not from the BAR */
@@ -446,7 +450,8 @@ struct pci_bus {
446 struct list_head children; /* list of child buses */ 450 struct list_head children; /* list of child buses */
447 struct list_head devices; /* list of devices on this bus */ 451 struct list_head devices; /* list of devices on this bus */
448 struct pci_dev *self; /* bridge device as seen by parent */ 452 struct pci_dev *self; /* bridge device as seen by parent */
449 struct list_head slots; /* list of slots on this bus */ 453 struct list_head slots; /* list of slots on this bus;
454 protected by pci_slot_mutex */
450 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 455 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
451 struct list_head resources; /* address space routed to this bus */ 456 struct list_head resources; /* address space routed to this bus */
452 struct resource busn_res; /* bus numbers routed to this bus */ 457 struct resource busn_res; /* bus numbers routed to this bus */
@@ -738,10 +743,11 @@ struct pci_driver {
738void pcie_bus_configure_settings(struct pci_bus *bus); 743void pcie_bus_configure_settings(struct pci_bus *bus);
739 744
740enum pcie_bus_config_types { 745enum pcie_bus_config_types {
741 PCIE_BUS_TUNE_OFF, 746 PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
742 PCIE_BUS_SAFE, 747 PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
743 PCIE_BUS_PERFORMANCE, 748 PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
744 PCIE_BUS_PEER2PEER, 749 PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
750 PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
745}; 751};
746 752
747extern enum pcie_bus_config_types pcie_bus_config; 753extern enum pcie_bus_config_types pcie_bus_config;
@@ -787,6 +793,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
787int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); 793int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
788int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); 794int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
789void pci_bus_release_busn_res(struct pci_bus *b); 795void pci_bus_release_busn_res(struct pci_bus *b);
796struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
797 struct pci_ops *ops, void *sysdata,
798 struct list_head *resources,
799 struct msi_controller *msi);
790struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 800struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
791 struct pci_ops *ops, void *sysdata, 801 struct pci_ops *ops, void *sysdata,
792 struct list_head *resources); 802 struct list_head *resources);
@@ -797,6 +807,11 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
797 const char *name, 807 const char *name,
798 struct hotplug_slot *hotplug); 808 struct hotplug_slot *hotplug);
799void pci_destroy_slot(struct pci_slot *slot); 809void pci_destroy_slot(struct pci_slot *slot);
810#ifdef CONFIG_SYSFS
811void pci_dev_assign_slot(struct pci_dev *dev);
812#else
813static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
814#endif
800int pci_scan_slot(struct pci_bus *bus, int devfn); 815int pci_scan_slot(struct pci_bus *bus, int devfn);
801struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 816struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
802void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 817void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -963,6 +978,23 @@ static inline int pci_is_managed(struct pci_dev *pdev)
963 return pdev->is_managed; 978 return pdev->is_managed;
964} 979}
965 980
981static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
982{
983 pdev->irq = irq;
984 pdev->irq_managed = 1;
985}
986
987static inline void pci_reset_managed_irq(struct pci_dev *pdev)
988{
989 pdev->irq = 0;
990 pdev->irq_managed = 0;
991}
992
993static inline bool pci_has_managed_irq(struct pci_dev *pdev)
994{
995 return pdev->irq_managed && pdev->irq > 0;
996}
997
966void pci_disable_device(struct pci_dev *dev); 998void pci_disable_device(struct pci_dev *dev);
967 999
968extern unsigned int pcibios_max_latency; 1000extern unsigned int pcibios_max_latency;
@@ -1195,6 +1227,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1195 dma_pool_create(name, &pdev->dev, size, align, allocation) 1227 dma_pool_create(name, &pdev->dev, size, align, allocation)
1196#define pci_pool_destroy(pool) dma_pool_destroy(pool) 1228#define pci_pool_destroy(pool) dma_pool_destroy(pool)
1197#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) 1229#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1230#define pci_pool_zalloc(pool, flags, handle) \
1231 dma_pool_zalloc(pool, flags, handle)
1198#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) 1232#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1199 1233
1200struct msix_entry { 1234struct msix_entry {
@@ -1202,6 +1236,7 @@ struct msix_entry {
1202 u16 entry; /* driver uses to specify entry, OS writes */ 1236 u16 entry; /* driver uses to specify entry, OS writes */
1203}; 1237};
1204 1238
1239void pci_msi_setup_pci_dev(struct pci_dev *dev);
1205 1240
1206#ifdef CONFIG_PCI_MSI 1241#ifdef CONFIG_PCI_MSI
1207int pci_msi_vec_count(struct pci_dev *dev); 1242int pci_msi_vec_count(struct pci_dev *dev);
@@ -1294,6 +1329,19 @@ int ht_create_irq(struct pci_dev *dev, int idx);
1294void ht_destroy_irq(unsigned int irq); 1329void ht_destroy_irq(unsigned int irq);
1295#endif /* CONFIG_HT_IRQ */ 1330#endif /* CONFIG_HT_IRQ */
1296 1331
1332#ifdef CONFIG_PCI_ATS
1333/* Address Translation Service */
1334void pci_ats_init(struct pci_dev *dev);
1335int pci_enable_ats(struct pci_dev *dev, int ps);
1336void pci_disable_ats(struct pci_dev *dev);
1337int pci_ats_queue_depth(struct pci_dev *dev);
1338#else
1339static inline void pci_ats_init(struct pci_dev *d) { }
1340static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
1341static inline void pci_disable_ats(struct pci_dev *d) { }
1342static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
1343#endif
1344
1297void pci_cfg_access_lock(struct pci_dev *dev); 1345void pci_cfg_access_lock(struct pci_dev *dev);
1298bool pci_cfg_access_trylock(struct pci_dev *dev); 1346bool pci_cfg_access_trylock(struct pci_dev *dev);
1299void pci_cfg_access_unlock(struct pci_dev *dev); 1347void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1645,6 +1693,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
1645int pcibios_add_device(struct pci_dev *dev); 1693int pcibios_add_device(struct pci_dev *dev);
1646void pcibios_release_device(struct pci_dev *dev); 1694void pcibios_release_device(struct pci_dev *dev);
1647void pcibios_penalize_isa_irq(int irq, int active); 1695void pcibios_penalize_isa_irq(int irq, int active);
1696int pcibios_alloc_irq(struct pci_dev *dev);
1697void pcibios_free_irq(struct pci_dev *dev);
1648 1698
1649#ifdef CONFIG_HIBERNATE_CALLBACKS 1699#ifdef CONFIG_HIBERNATE_CALLBACKS
1650extern struct dev_pm_ops pcibios_pm_ops; 1700extern struct dev_pm_ops pcibios_pm_ops;
@@ -1661,6 +1711,7 @@ static inline void pci_mmcfg_late_init(void) { }
1661int pci_ext_cfg_avail(void); 1711int pci_ext_cfg_avail(void);
1662 1712
1663void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 1713void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
1714void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
1664 1715
1665#ifdef CONFIG_PCI_IOV 1716#ifdef CONFIG_PCI_IOV
1666int pci_iov_virtfn_bus(struct pci_dev *dev, int id); 1717int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
@@ -1842,10 +1893,12 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
1842/* PCI <-> OF binding helpers */ 1893/* PCI <-> OF binding helpers */
1843#ifdef CONFIG_OF 1894#ifdef CONFIG_OF
1844struct device_node; 1895struct device_node;
1896struct irq_domain;
1845void pci_set_of_node(struct pci_dev *dev); 1897void pci_set_of_node(struct pci_dev *dev);
1846void pci_release_of_node(struct pci_dev *dev); 1898void pci_release_of_node(struct pci_dev *dev);
1847void pci_set_bus_of_node(struct pci_bus *bus); 1899void pci_set_bus_of_node(struct pci_bus *bus);
1848void pci_release_bus_of_node(struct pci_bus *bus); 1900void pci_release_bus_of_node(struct pci_bus *bus);
1901struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
1849 1902
1850/* Arch may override this (weak) */ 1903/* Arch may override this (weak) */
1851struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 1904struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1868,6 +1921,8 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
1868static inline void pci_release_bus_of_node(struct pci_bus *bus) { } 1921static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
1869static inline struct device_node * 1922static inline struct device_node *
1870pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } 1923pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
1924static inline struct irq_domain *
1925pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
1871#endif /* CONFIG_OF */ 1926#endif /* CONFIG_OF */
1872 1927
1873#ifdef CONFIG_EEH 1928#ifdef CONFIG_EEH
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fcff8f865341..d9ba49cedc5d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2332,6 +2332,15 @@
2332 2332
2333#define PCI_VENDOR_ID_CAVIUM 0x177d 2333#define PCI_VENDOR_ID_CAVIUM 0x177d
2334 2334
2335#define PCI_VENDOR_ID_TECHWELL 0x1797
2336#define PCI_DEVICE_ID_TECHWELL_6800 0x6800
2337#define PCI_DEVICE_ID_TECHWELL_6801 0x6801
2338#define PCI_DEVICE_ID_TECHWELL_6804 0x6804
2339#define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810
2340#define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811
2341#define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812
2342#define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813
2343
2335#define PCI_VENDOR_ID_BELKIN 0x1799 2344#define PCI_VENDOR_ID_BELKIN 0x1799
2336#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f 2345#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
2337 2346
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57f3a1c550dc..8f16299ca068 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -488,10 +488,8 @@ do { \
488#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) 488#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
489 489
490/* 490/*
491 * Operations with implied preemption protection. These operations can be 491 * Operations with implied preemption/interrupt protection. These
492 * used without worrying about preemption. Note that interrupts may still 492 * operations can be used without worrying about preemption or interrupt.
493 * occur while an operation is in progress and if the interrupt modifies
494 * the variable too then RMW actions may not be reliable.
495 */ 493 */
496#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) 494#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
497#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) 495#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 3e88c9a7d57f..834c4e52cb2d 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -16,6 +16,7 @@ struct percpu_rw_semaphore {
16}; 16};
17 17
18extern void percpu_down_read(struct percpu_rw_semaphore *); 18extern void percpu_down_read(struct percpu_rw_semaphore *);
19extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
19extern void percpu_up_read(struct percpu_rw_semaphore *); 20extern void percpu_up_read(struct percpu_rw_semaphore *);
20 21
21extern void percpu_down_write(struct percpu_rw_semaphore *); 22extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -31,4 +32,23 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
31 __percpu_init_rwsem(brw, #brw, &rwsem_key); \ 32 __percpu_init_rwsem(brw, #brw, &rwsem_key); \
32}) 33})
33 34
35
36#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
37
38static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
39 bool read, unsigned long ip)
40{
41 lock_release(&sem->rw_sem.dep_map, 1, ip);
42#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
43 if (!read)
44 sem->rw_sem.owner = NULL;
45#endif
46}
47
48static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
49 bool read, unsigned long ip)
50{
51 lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
52}
53
34#endif 54#endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 000000000000..bfa673bb822d
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
1/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
15#include <linux/interrupt.h>
16#include <linux/perf_event.h>
17
18#include <asm/cputype.h>
19
20/*
21 * struct arm_pmu_platdata - ARM PMU platform data
22 *
23 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
27 */
28struct arm_pmu_platdata {
29 irqreturn_t (*handle_irq)(int irq, void *dev,
30 irq_handler_t pmu_handler);
31};
32
33#ifdef CONFIG_ARM_PMU
34
35/*
36 * The ARMv7 CPU PMU supports up to 32 event counters.
37 */
38#define ARMPMU_MAX_HWEVENTS 32
39
40#define HW_OP_UNSUPPORTED 0xFFFF
41#define C(_x) PERF_COUNT_HW_CACHE_##_x
42#define CACHE_OP_UNSUPPORTED 0xFFFF
43
44#define PERF_MAP_ALL_UNSUPPORTED \
45 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
46
47#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
48[0 ... C(MAX) - 1] = { \
49 [0 ... C(OP_MAX) - 1] = { \
50 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
51 }, \
52}
53
54/* The events for a given PMU register set. */
55struct pmu_hw_events {
56 /*
57 * The events that are active on the PMU for the given index.
58 */
59 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
60
61 /*
62 * A 1 bit for an index indicates that the counter is being used for
63 * an event. A 0 means that the counter can be used.
64 */
65 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
66
67 /*
68 * Hardware lock to serialize accesses to PMU registers. Needed for the
69 * read/modify/write sequences.
70 */
71 raw_spinlock_t pmu_lock;
72
73 /*
74 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
75 * already have to allocate this struct per cpu.
76 */
77 struct arm_pmu *percpu_pmu;
78};
79
80struct arm_pmu {
81 struct pmu pmu;
82 cpumask_t active_irqs;
83 cpumask_t supported_cpus;
84 int *irq_affinity;
85 char *name;
86 irqreturn_t (*handle_irq)(int irq_num, void *dev);
87 void (*enable)(struct perf_event *event);
88 void (*disable)(struct perf_event *event);
89 int (*get_event_idx)(struct pmu_hw_events *hw_events,
90 struct perf_event *event);
91 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
92 struct perf_event *event);
93 int (*set_event_filter)(struct hw_perf_event *evt,
94 struct perf_event_attr *attr);
95 u32 (*read_counter)(struct perf_event *event);
96 void (*write_counter)(struct perf_event *event, u32 val);
97 void (*start)(struct arm_pmu *);
98 void (*stop)(struct arm_pmu *);
99 void (*reset)(void *);
100 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
101 void (*free_irq)(struct arm_pmu *);
102 int (*map_event)(struct perf_event *event);
103 int num_events;
104 atomic_t active_events;
105 struct mutex reserve_mutex;
106 u64 max_period;
107 struct platform_device *plat_device;
108 struct pmu_hw_events __percpu *hw_events;
109 struct notifier_block hotplug_nb;
110};
111
112#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
113
114int armpmu_register(struct arm_pmu *armpmu, int type);
115
116u64 armpmu_event_update(struct perf_event *event);
117
118int armpmu_event_set_period(struct perf_event *event);
119
120int armpmu_map_event(struct perf_event *event,
121 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
122 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
123 [PERF_COUNT_HW_CACHE_OP_MAX]
124 [PERF_COUNT_HW_CACHE_RESULT_MAX],
125 u32 raw_event_mask);
126
127struct pmu_probe_info {
128 unsigned int cpuid;
129 unsigned int mask;
130 int (*init)(struct arm_pmu *);
131};
132
133#define PMU_PROBE(_cpuid, _mask, _fn) \
134{ \
135 .cpuid = (_cpuid), \
136 .mask = (_mask), \
137 .init = (_fn), \
138}
139
140#define ARM_PMU_PROBE(_cpuid, _fn) \
141 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
142
143#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
144
145#define XSCALE_PMU_PROBE(_version, _fn) \
146 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
147
148int arm_pmu_device_probe(struct platform_device *pdev,
149 const struct of_device_id *of_table,
150 const struct pmu_probe_info *probe_table);
151
152#endif /* CONFIG_ARM_PMU */
153
154#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809433b3..092a0e8a479a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child);
641extern void perf_event_exit_task(struct task_struct *child); 641extern void perf_event_exit_task(struct task_struct *child);
642extern void perf_event_free_task(struct task_struct *task); 642extern void perf_event_free_task(struct task_struct *task);
643extern void perf_event_delayed_put(struct task_struct *task); 643extern void perf_event_delayed_put(struct task_struct *task);
644extern struct perf_event *perf_event_get(unsigned int fd);
645extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
644extern void perf_event_print_debug(void); 646extern void perf_event_print_debug(void);
645extern void perf_pmu_disable(struct pmu *pmu); 647extern void perf_pmu_disable(struct pmu *pmu);
646extern void perf_pmu_enable(struct pmu *pmu); 648extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
659 void *context); 661 void *context);
660extern void perf_pmu_migrate_context(struct pmu *pmu, 662extern void perf_pmu_migrate_context(struct pmu *pmu,
661 int src_cpu, int dst_cpu); 663 int src_cpu, int dst_cpu);
664extern u64 perf_event_read_local(struct perf_event *event);
662extern u64 perf_event_read_value(struct perf_event *event, 665extern u64 perf_event_read_value(struct perf_event *event,
663 u64 *enabled, u64 *running); 666 u64 *enabled, u64 *running);
664 667
@@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
979static inline void perf_event_exit_task(struct task_struct *child) { } 982static inline void perf_event_exit_task(struct task_struct *child) { }
980static inline void perf_event_free_task(struct task_struct *task) { } 983static inline void perf_event_free_task(struct task_struct *task) { }
981static inline void perf_event_delayed_put(struct task_struct *task) { } 984static inline void perf_event_delayed_put(struct task_struct *task) { }
985static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
986static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
987{
988 return ERR_PTR(-EINVAL);
989}
990static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
982static inline void perf_event_print_debug(void) { } 991static inline void perf_event_print_debug(void) { }
983static inline int perf_event_task_disable(void) { return -EINVAL; } 992static inline int perf_event_task_disable(void) { return -EINVAL; }
984static inline int perf_event_task_enable(void) { return -EINVAL; } 993static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event) { }
1011static inline void perf_event_disable(struct perf_event *event) { } 1020static inline void perf_event_disable(struct perf_event *event) { }
1012static inline int __perf_event_disable(void *info) { return -1; } 1021static inline int __perf_event_disable(void *info) { return -1; }
1013static inline void perf_event_task_tick(void) { } 1022static inline void perf_event_task_tick(void) { }
1023static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1014#endif 1024#endif
1015 1025
1016#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) 1026#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f84b8dd..962387a192f1 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -330,6 +330,7 @@ struct phy_c45_device_ids {
330 * c45_ids: 802.3-c45 Device Identifers if is_c45. 330 * c45_ids: 802.3-c45 Device Identifers if is_c45.
331 * is_c45: Set to true if this phy uses clause 45 addressing. 331 * is_c45: Set to true if this phy uses clause 45 addressing.
332 * is_internal: Set to true if this phy is internal to a MAC. 332 * is_internal: Set to true if this phy is internal to a MAC.
333 * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
333 * has_fixups: Set to true if this phy has fixups/quirks. 334 * has_fixups: Set to true if this phy has fixups/quirks.
334 * suspended: Set to true if this phy has been suspended successfully. 335 * suspended: Set to true if this phy has been suspended successfully.
335 * state: state of the PHY for management purposes 336 * state: state of the PHY for management purposes
@@ -368,6 +369,7 @@ struct phy_device {
368 struct phy_c45_device_ids c45_ids; 369 struct phy_c45_device_ids c45_ids;
369 bool is_c45; 370 bool is_c45;
370 bool is_internal; 371 bool is_internal;
372 bool is_pseudo_fixed_link;
371 bool has_fixups; 373 bool has_fixups;
372 bool suspended; 374 bool suspended;
373 375
@@ -424,6 +426,8 @@ struct phy_device {
424 426
425 struct net_device *attached_dev; 427 struct net_device *attached_dev;
426 428
429 u8 mdix;
430
427 void (*adjust_link)(struct net_device *dev); 431 void (*adjust_link)(struct net_device *dev);
428}; 432};
429#define to_phy_device(d) container_of(d, struct phy_device, dev) 433#define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -686,6 +690,16 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
686{ 690{
687 return phydev->interface >= PHY_INTERFACE_MODE_RGMII && 691 return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
688 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; 692 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
693};
694
695/*
696 * phy_is_pseudo_fixed_link - Convenience function for testing if this
697 * PHY is the CPU port facing side of an Ethernet switch, or similar.
698 * @phydev: the phy_device struct
699 */
700static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
701{
702 return phydev->is_pseudo_fixed_link;
689} 703}
690 704
691/** 705/**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index fe5732d53eda..2400d2ea4f34 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@ struct device_node;
13 13
14#if IS_ENABLED(CONFIG_FIXED_PHY) 14#if IS_ENABLED(CONFIG_FIXED_PHY)
15extern int fixed_phy_add(unsigned int irq, int phy_id, 15extern int fixed_phy_add(unsigned int irq, int phy_id,
16 struct fixed_phy_status *status); 16 struct fixed_phy_status *status,
17 int link_gpio);
17extern struct phy_device *fixed_phy_register(unsigned int irq, 18extern struct phy_device *fixed_phy_register(unsigned int irq,
18 struct fixed_phy_status *status, 19 struct fixed_phy_status *status,
20 int link_gpio,
19 struct device_node *np); 21 struct device_node *np);
20extern void fixed_phy_del(int phy_addr); 22extern void fixed_phy_del(int phy_addr);
21extern int fixed_phy_set_link_update(struct phy_device *phydev, 23extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@ extern int fixed_phy_update_state(struct phy_device *phydev,
26 const struct fixed_phy_status *changed); 28 const struct fixed_phy_status *changed);
27#else 29#else
28static inline int fixed_phy_add(unsigned int irq, int phy_id, 30static inline int fixed_phy_add(unsigned int irq, int phy_id,
29 struct fixed_phy_status *status) 31 struct fixed_phy_status *status,
32 int link_gpio)
30{ 33{
31 return -ENODEV; 34 return -ENODEV;
32} 35}
33static inline struct phy_device *fixed_phy_register(unsigned int irq, 36static inline struct phy_device *fixed_phy_register(unsigned int irq,
34 struct fixed_phy_status *status, 37 struct fixed_phy_status *status,
38 int gpio_link,
35 struct device_node *np) 39 struct device_node *np)
36{ 40{
37 return ERR_PTR(-ENODEV); 41 return ERR_PTR(-ENODEV);
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 4b452c6a2f7b..527a85c61924 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -46,18 +46,6 @@ struct at91_cf_data {
46#define AT91_IDE_SWAP_A0_A2 0x02 46#define AT91_IDE_SWAP_A0_A2 0x02
47}; 47};
48 48
49 /* USB Host */
50#define AT91_MAX_USBH_PORTS 3
51struct at91_usbh_data {
52 int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
53 int overcurrent_pin[AT91_MAX_USBH_PORTS];
54 u8 ports; /* number of ports on root hub */
55 u8 overcurrent_supported;
56 u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
57 u8 overcurrent_status[AT91_MAX_USBH_PORTS];
58 u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
59};
60
61 /* NAND / SmartMedia */ 49 /* NAND / SmartMedia */
62struct atmel_nand_data { 50struct atmel_nand_data {
63 int enable_pin; /* chip enable */ 51 int enable_pin; /* chip enable */
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
index 02bf6ea31701..695035a8d7fb 100644
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ b/include/linux/platform_data/atmel_mxt_ts.h
@@ -10,16 +10,22 @@
10 * option) any later version. 10 * option) any later version.
11 */ 11 */
12 12
13#ifndef __LINUX_ATMEL_MXT_TS_H 13#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
14#define __LINUX_ATMEL_MXT_TS_H 14#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18enum mxt_suspend_mode {
19 MXT_SUSPEND_DEEP_SLEEP = 0,
20 MXT_SUSPEND_T9_CTRL = 1,
21};
22
18/* The platform data for the Atmel maXTouch touchscreen driver */ 23/* The platform data for the Atmel maXTouch touchscreen driver */
19struct mxt_platform_data { 24struct mxt_platform_data {
20 unsigned long irqflags; 25 unsigned long irqflags;
21 u8 t19_num_keys; 26 u8 t19_num_keys;
22 const unsigned int *t19_keymap; 27 const unsigned int *t19_keymap;
28 enum mxt_suspend_mode suspend_mode;
23}; 29};
24 30
25#endif /* __LINUX_ATMEL_MXT_TS_H */ 31#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 97baf831e071..3af0da1f3be5 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,14 +10,8 @@
10#ifndef __CLK_UX500_H 10#ifndef __CLK_UX500_H
11#define __CLK_UX500_H 11#define __CLK_UX500_H
12 12
13void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, 13void u8500_clk_init(void);
14 u32 clkrst5_base, u32 clkrst6_base); 14void u9540_clk_init(void);
15 15void u8540_clk_init(void);
16void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
17 u32 clkrst5_base, u32 clkrst6_base);
18void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
19 u32 clkrst5_base, u32 clkrst6_base);
20void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
21 u32 clkrst5_base, u32 clkrst6_base);
22 16
23#endif /* __CLK_UX500_H */ 17#endif /* __CLK_UX500_H */
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519d2dcd..000000000000
--- a/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __GPIO_EM_H__
2#define __GPIO_EM_H__
3
4struct gpio_em_config {
5 unsigned int gpio_base;
6 unsigned int irq_base;
7 unsigned int number_of_pins;
8 const char *pctl_name;
9};
10
11#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
new file mode 100644
index 000000000000..c68712aadf43
--- /dev/null
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -0,0 +1,44 @@
1/*
2 * I2C multiplexer using a single register
3 *
4 * Copyright 2015 Freescale Semiconductor
5 * York Sun <yorksun@freescale.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
14#define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
15
16/**
17 * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg
18 * @parent: Parent I2C bus adapter number
19 * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
20 * @values: Array of value for each channel
21 * @n_values: Number of multiplexer channels
22 * @little_endian: Indicating if the register is in little endian
23 * @write_only: Reading the register is not allowed by hardware
24 * @classes: Optional I2C auto-detection classes
25 * @idle: Value to write to mux when idle
26 * @idle_in_use: indicate if idle value is in use
27 * @reg: Virtual address of the register to switch channel
28 * @reg_size: register size in bytes
29 */
30struct i2c_mux_reg_platform_data {
31 int parent;
32 int base_nr;
33 const unsigned int *values;
34 int n_values;
35 bool little_endian;
36 bool write_only;
37 const unsigned int *classes;
38 u32 idle;
39 bool idle_in_use;
40 void __iomem *reg;
41 resource_size_t reg_size;
42};
43
44#endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 000000000000..f16542c77ff7
--- /dev/null
+++ b/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
1/*
2 * Platform data for the Intel TCO Watchdog
3 */
4
5#ifndef _ITCO_WDT_H_
6#define _ITCO_WDT_H_
7
8/* Watchdog resources */
9#define ICH_RES_IO_TCO 0
10#define ICH_RES_IO_SMI 1
11#define ICH_RES_MEM_OFF 2
12#define ICH_RES_MEM_GCS_PMC 0
13
14struct itco_wdt_platform_data {
15 char name[32];
16 unsigned int version;
17};
18
19#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
index 6a9fed57f346..eb8a6860e816 100644
--- a/include/linux/platform_data/leds-kirkwood-ns2.h
+++ b/include/linux/platform_data/leds-kirkwood-ns2.h
@@ -9,11 +9,25 @@
9#ifndef __LEDS_KIRKWOOD_NS2_H 9#ifndef __LEDS_KIRKWOOD_NS2_H
10#define __LEDS_KIRKWOOD_NS2_H 10#define __LEDS_KIRKWOOD_NS2_H
11 11
12enum ns2_led_modes {
13 NS_V2_LED_OFF,
14 NS_V2_LED_ON,
15 NS_V2_LED_SATA,
16};
17
18struct ns2_led_modval {
19 enum ns2_led_modes mode;
20 int cmd_level;
21 int slow_level;
22};
23
12struct ns2_led { 24struct ns2_led {
13 const char *name; 25 const char *name;
14 const char *default_trigger; 26 const char *default_trigger;
15 unsigned cmd; 27 unsigned cmd;
16 unsigned slow; 28 unsigned slow;
29 int num_modes;
30 struct ns2_led_modval *modval;
17}; 31};
18 32
19struct ns2_led_platform_data { 33struct ns2_led_platform_data {
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 9c7fd1efe495..1b2ba24e4e03 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@ struct lp855x_rom_data {
136 Only valid when mode is PWM_BASED. 136 Only valid when mode is PWM_BASED.
137 * @size_program : total size of lp855x_rom_data 137 * @size_program : total size of lp855x_rom_data
138 * @rom_data : list of new eeprom/eprom registers 138 * @rom_data : list of new eeprom/eprom registers
139 * @supply : regulator that supplies 3V input
140 */ 139 */
141struct lp855x_platform_data { 140struct lp855x_platform_data {
142 const char *name; 141 const char *name;
@@ -145,7 +144,6 @@ struct lp855x_platform_data {
145 unsigned int period_ns; 144 unsigned int period_ns;
146 int size_program; 145 int size_program;
147 struct lp855x_rom_data *rom_data; 146 struct lp855x_rom_data *rom_data;
148 struct regulator *supply;
149}; 147};
150 148
151#endif 149#endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index e1571efa3f2b..95ccab3f454a 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -45,5 +45,6 @@ struct esdhc_platform_data {
45 int max_bus_width; 45 int max_bus_width;
46 bool support_vsel; 46 bool support_vsel;
47 unsigned int delay_line; 47 unsigned int delay_line;
48 unsigned int tuning_step; /* The delay cell steps in tuning procedure */
48}; 49};
49#endif /* __ASM_ARCH_IMX_ESDHC_H */ 50#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
index 7bae83b7c396..646af6f8b838 100644
--- a/include/linux/input/pixcir_ts.h
+++ b/include/linux/platform_data/pixcir_i2c_ts.h
@@ -57,7 +57,6 @@ struct pixcir_i2c_chip_data {
57struct pixcir_ts_platform_data { 57struct pixcir_ts_platform_data {
58 int x_max; 58 int x_max;
59 int y_max; 59 int y_max;
60 int gpio_attb; /* GPIO connected to ATTB line */
61 struct pixcir_i2c_chip_data chip; 60 struct pixcir_i2c_chip_data chip;
62}; 61};
63 62
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index 8dc2fa47a2aa..f4edcb03c40c 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -49,6 +49,7 @@ struct davinci_spi_platform_data {
49 u8 num_chipselect; 49 u8 num_chipselect;
50 u8 intr_line; 50 u8 intr_line;
51 u8 *chip_sel; 51 u8 *chip_sel;
52 u8 prescaler_limit;
52 bool cshold_bug; 53 bool cshold_bug;
53 enum dma_event_q dma_event_q; 54 enum dma_event_q dma_event_q;
54}; 55};
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
new file mode 100644
index 000000000000..54b04483976c
--- /dev/null
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -0,0 +1,20 @@
1/*
2 * MTK SPI bus driver definitions
3 *
4 * Copyright (c) 2015 MediaTek Inc.
5 * Author: Leilk Liu <leilk.liu@mediatek.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H
13#define ____LINUX_PLATFORM_DATA_SPI_MTK_H
14
15/* Board specific platform_data */
16struct mtk_chip_config {
17 u32 tx_mlsb;
18 u32 rx_mlsb;
19};
20#endif
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
deleted file mode 100644
index d9d400a297bd..000000000000
--- a/include/linux/platform_data/st_nci.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Driver include for ST NCI NFC chip family.
3 *
4 * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _ST_NCI_H_
20#define _ST_NCI_H_
21
22#define ST_NCI_DRIVER_NAME "st_nci"
23
24struct st_nci_nfc_platform_data {
25 unsigned int gpio_reset;
26 unsigned int irq_polarity;
27};
28
29#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h
index 92fc2b2232e7..699ac4109366 100644
--- a/include/linux/platform_data/video-ep93xx.h
+++ b/include/linux/platform_data/video-ep93xx.h
@@ -2,11 +2,8 @@
2#define __VIDEO_EP93XX_H 2#define __VIDEO_EP93XX_H
3 3
4struct platform_device; 4struct platform_device;
5struct fb_videomode;
6struct fb_info; 5struct fb_info;
7 6
8#define EP93XXFB_USE_MODEDB 0
9
10/* VideoAttributes flags */ 7/* VideoAttributes flags */
11#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0) 8#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0)
12#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1) 9#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1)
@@ -38,12 +35,7 @@ struct fb_info;
38 EP93XXFB_PIXEL_DATA_ENABLE) 35 EP93XXFB_PIXEL_DATA_ENABLE)
39 36
40struct ep93xxfb_mach_info { 37struct ep93xxfb_mach_info {
41 unsigned int num_modes;
42 const struct fb_videomode *modes;
43 const struct fb_videomode *default_mode;
44 int bpp;
45 unsigned int flags; 38 unsigned int flags;
46
47 int (*setup)(struct platform_device *pdev); 39 int (*setup)(struct platform_device *pdev);
48 void (*teardown)(struct platform_device *pdev); 40 void (*teardown)(struct platform_device *pdev);
49 void (*blank)(int blank_mode, struct fb_info *info); 41 void (*blank)(int blank_mode, struct fb_info *info);
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 0472ab2f6ede..7bdece8ef33e 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
16#define _LINUX_INPUT_ZFORCE_TS_H 16#define _LINUX_INPUT_ZFORCE_TS_H
17 17
18struct zforce_ts_platdata { 18struct zforce_ts_platdata {
19 int gpio_int;
20 int gpio_rst;
21
22 unsigned int x_max; 19 unsigned int x_max;
23 unsigned int y_max; 20 unsigned int y_max;
24}; 21};
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 681ccb053f72..b1cf7e797892 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -22,9 +22,6 @@
22 22
23enum gpd_status { 23enum gpd_status {
24 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 24 GPD_STATE_ACTIVE = 0, /* PM domain is active */
25 GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
26 GPD_STATE_BUSY, /* Something is happening to the PM domain */
27 GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
28 GPD_STATE_POWER_OFF, /* PM domain is off */ 25 GPD_STATE_POWER_OFF, /* PM domain is off */
29}; 26};
30 27
@@ -59,9 +56,6 @@ struct generic_pm_domain {
59 unsigned int in_progress; /* Number of devices being suspended now */ 56 unsigned int in_progress; /* Number of devices being suspended now */
60 atomic_t sd_count; /* Number of subdomains with power "on" */ 57 atomic_t sd_count; /* Number of subdomains with power "on" */
61 enum gpd_status status; /* Current state of the domain */ 58 enum gpd_status status; /* Current state of the domain */
62 wait_queue_head_t status_wait_queue;
63 struct task_struct *poweroff_task; /* Powering off task */
64 unsigned int resume_count; /* Number of devices being resumed */
65 unsigned int device_count; /* Number of devices */ 59 unsigned int device_count; /* Number of devices */
66 unsigned int suspended_count; /* System suspend device counter */ 60 unsigned int suspended_count; /* System suspend device counter */
67 unsigned int prepared_count; /* Suspend counter of prepared devices */ 61 unsigned int prepared_count; /* Suspend counter of prepared devices */
@@ -113,7 +107,6 @@ struct generic_pm_domain_data {
113 struct pm_domain_data base; 107 struct pm_domain_data base;
114 struct gpd_timing_data td; 108 struct gpd_timing_data td;
115 struct notifier_block nb; 109 struct notifier_block nb;
116 int need_restore;
117}; 110};
118 111
119#ifdef CONFIG_PM_GENERIC_DOMAINS 112#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
228 return -ENOSYS; 221 return -ENOSYS;
229} 222}
230static inline void pm_genpd_poweroff_unused(void) {} 223static inline void pm_genpd_poweroff_unused(void) {}
231#define simple_qos_governor NULL
232#define pm_domain_always_on_gov NULL
233#endif 224#endif
234 225
235static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 226static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cec2d4540914..cab7ba55bedb 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -30,7 +30,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
30 30
31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); 31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
32 32
33bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
34
33int dev_pm_opp_get_opp_count(struct device *dev); 35int dev_pm_opp_get_opp_count(struct device *dev);
36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
34 37
35struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 38struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
36 unsigned long freq, 39 unsigned long freq,
@@ -62,11 +65,21 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
62 return 0; 65 return 0;
63} 66}
64 67
68static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
69{
70 return false;
71}
72
65static inline int dev_pm_opp_get_opp_count(struct device *dev) 73static inline int dev_pm_opp_get_opp_count(struct device *dev)
66{ 74{
67 return 0; 75 return 0;
68} 76}
69 77
78static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
79{
80 return 0;
81}
82
70static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 83static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
71 unsigned long freq, bool available) 84 unsigned long freq, bool available)
72{ 85{
@@ -115,6 +128,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
115#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 128#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
116int of_init_opp_table(struct device *dev); 129int of_init_opp_table(struct device *dev);
117void of_free_opp_table(struct device *dev); 130void of_free_opp_table(struct device *dev);
131int of_cpumask_init_opp_table(cpumask_var_t cpumask);
132void of_cpumask_free_opp_table(cpumask_var_t cpumask);
133int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
134int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
118#else 135#else
119static inline int of_init_opp_table(struct device *dev) 136static inline int of_init_opp_table(struct device *dev)
120{ 137{
@@ -124,6 +141,25 @@ static inline int of_init_opp_table(struct device *dev)
124static inline void of_free_opp_table(struct device *dev) 141static inline void of_free_opp_table(struct device *dev)
125{ 142{
126} 143}
144
145static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
146{
147 return -ENOSYS;
148}
149
150static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
151{
152}
153
154static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
155{
156 return -ENOSYS;
157}
158
159static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
160{
161 return -ENOSYS;
162}
127#endif 163#endif
128 164
129#endif /* __LINUX_OPP_H__ */ 165#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 7b3ae0cffc05..0f65d36c2a75 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); 161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); 162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); 163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
164int dev_pm_qos_expose_latency_tolerance(struct device *dev);
165void dev_pm_qos_hide_latency_tolerance(struct device *dev);
164 166
165static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) 167static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
166{ 168{
@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
229 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } 231 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
230static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) 232static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
231 { return 0; } 233 { return 0; }
234static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
235 { return 0; }
236static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
232 237
233static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } 238static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
234static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } 239static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 30e84d48bfea..3bdbb4189780 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev)
98 return dev->power.runtime_status == RPM_SUSPENDED; 98 return dev->power.runtime_status == RPM_SUSPENDED;
99} 99}
100 100
101static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
102{
103 return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
104}
105
106static inline bool pm_runtime_enabled(struct device *dev) 101static inline bool pm_runtime_enabled(struct device *dev)
107{ 102{
108 return !dev->power.disable_depth; 103 return !dev->power.disable_depth;
@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {}
164static inline bool pm_runtime_suspended(struct device *dev) { return false; } 159static inline bool pm_runtime_suspended(struct device *dev) { return false; }
165static inline bool pm_runtime_active(struct device *dev) { return true; } 160static inline bool pm_runtime_active(struct device *dev) { return true; }
166static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } 161static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
167static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
168static inline bool pm_runtime_enabled(struct device *dev) { return false; } 162static inline bool pm_runtime_enabled(struct device *dev) { return false; }
169 163
170static inline void pm_runtime_no_callbacks(struct device *dev) {} 164static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index d2114045a6c4..85f810b33917 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -14,28 +14,42 @@
14#define __PMEM_H__ 14#define __PMEM_H__
15 15
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/uio.h>
17 18
18#ifdef CONFIG_ARCH_HAS_PMEM_API 19#ifdef CONFIG_ARCH_HAS_PMEM_API
19#include <asm/cacheflush.h> 20#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
21#include <asm/pmem.h>
20#else 22#else
23#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
24/*
25 * These are simply here to enable compilation, all call sites gate
26 * calling these symbols with arch_has_pmem_api() and redirect to the
27 * implementation in asm/pmem.h.
28 */
29static inline bool __arch_has_wmb_pmem(void)
30{
31 return false;
32}
33
21static inline void arch_wmb_pmem(void) 34static inline void arch_wmb_pmem(void)
22{ 35{
23 BUG(); 36 BUG();
24} 37}
25 38
26static inline bool __arch_has_wmb_pmem(void) 39static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
40 size_t n)
27{ 41{
28 return false; 42 BUG();
29} 43}
30 44
31static inline void __pmem *arch_memremap_pmem(resource_size_t offset, 45static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
32 unsigned long size) 46 struct iov_iter *i)
33{ 47{
34 return NULL; 48 BUG();
49 return 0;
35} 50}
36 51
37static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, 52static inline void arch_clear_pmem(void __pmem *addr, size_t size)
38 size_t n)
39{ 53{
40 BUG(); 54 BUG();
41} 55}
@@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
43 57
44/* 58/*
45 * Architectures that define ARCH_HAS_PMEM_API must provide 59 * Architectures that define ARCH_HAS_PMEM_API must provide
46 * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), 60 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
47 * arch_wmb_pmem(), and __arch_has_wmb_pmem(). 61 * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
48 */ 62 */
49
50static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) 63static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
51{ 64{
52 memcpy(dst, (void __force const *) src, size); 65 memcpy(dst, (void __force const *) src, size);
53} 66}
54 67
55static inline void memunmap_pmem(void __pmem *addr) 68static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
69{
70 devm_memunmap(dev, (void __force *) addr);
71}
72
73static inline bool arch_has_pmem_api(void)
56{ 74{
57 iounmap((void __force __iomem *) addr); 75 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
58} 76}
59 77
60/** 78/**
@@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr)
68 */ 86 */
69static inline bool arch_has_wmb_pmem(void) 87static inline bool arch_has_wmb_pmem(void)
70{ 88{
71 if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) 89 return arch_has_pmem_api() && __arch_has_wmb_pmem();
72 return __arch_has_wmb_pmem();
73 return false;
74}
75
76static inline bool arch_has_pmem_api(void)
77{
78 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
79} 90}
80 91
81/* 92/*
@@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void)
85 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for 96 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
86 * making data durable relative to i/o completion. 97 * making data durable relative to i/o completion.
87 */ 98 */
88static void default_memcpy_to_pmem(void __pmem *dst, const void *src, 99static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
89 size_t size) 100 size_t size)
90{ 101{
91 memcpy((void __force *) dst, src, size); 102 memcpy((void __force *) dst, src, size);
92} 103}
93 104
94static void __pmem *default_memremap_pmem(resource_size_t offset, 105static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
95 unsigned long size) 106 size_t bytes, struct iov_iter *i)
107{
108 return copy_from_iter_nocache((void __force *)addr, bytes, i);
109}
110
111static inline void default_clear_pmem(void __pmem *addr, size_t size)
96{ 112{
97 return (void __pmem __force *)ioremap_wt(offset, size); 113 if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
114 clear_page((void __force *)addr);
115 else
116 memset((void __force *)addr, 0, size);
98} 117}
99 118
100/** 119/**
@@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset,
109 * wmb_pmem() arrange for the data to be written through the 128 * wmb_pmem() arrange for the data to be written through the
110 * cache to persistent media. 129 * cache to persistent media.
111 */ 130 */
112static inline void __pmem *memremap_pmem(resource_size_t offset, 131static inline void __pmem *memremap_pmem(struct device *dev,
113 unsigned long size) 132 resource_size_t offset, unsigned long size)
114{ 133{
115 if (arch_has_pmem_api()) 134 return (void __pmem *) devm_memremap(dev, offset, size,
116 return arch_memremap_pmem(offset, size); 135 ARCH_MEMREMAP_PMEM);
117 return default_memremap_pmem(offset, size);
118} 136}
119 137
120/** 138/**
@@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
146 */ 164 */
147static inline void wmb_pmem(void) 165static inline void wmb_pmem(void)
148{ 166{
149 if (arch_has_pmem_api()) 167 if (arch_has_wmb_pmem())
150 arch_wmb_pmem(); 168 arch_wmb_pmem();
169 else
170 wmb();
171}
172
173/**
174 * copy_from_iter_pmem - copy data from an iterator to PMEM
175 * @addr: PMEM destination address
176 * @bytes: number of bytes to copy
177 * @i: iterator with source data
178 *
179 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
180 * This function requires explicit ordering with a wmb_pmem() call.
181 */
182static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
183 struct iov_iter *i)
184{
185 if (arch_has_pmem_api())
186 return arch_copy_from_iter_pmem(addr, bytes, i);
187 return default_copy_from_iter_pmem(addr, bytes, i);
188}
189
190/**
191 * clear_pmem - zero a PMEM memory range
192 * @addr: virtual start address
193 * @size: number of bytes to zero
194 *
195 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
196 * This function requires explicit ordering with a wmb_pmem() call.
197 */
198static inline void clear_pmem(void __pmem *addr, size_t size)
199{
200 if (arch_has_pmem_api())
201 arch_clear_pmem(addr, size);
202 else
203 default_clear_pmem(addr, size);
151} 204}
152#endif /* __PMEM_H__ */ 205#endif /* __PMEM_H__ */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 2110a81c5e2a..317e16de09e5 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -19,8 +19,8 @@
19 * under normal circumstances, used to verify that nobody uses 19 * under normal circumstances, used to verify that nobody uses
20 * non-initialized list entries. 20 * non-initialized list entries.
21 */ 21 */
22#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) 22#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
23#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) 23#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
24 24
25/********** include/linux/timer.h **********/ 25/********** include/linux/timer.h **********/
26/* 26/*
@@ -69,10 +69,6 @@
69#define ATM_POISON_FREE 0x12 69#define ATM_POISON_FREE 0x12
70#define ATM_POISON 0xdeadbeef 70#define ATM_POISON 0xdeadbeef
71 71
72/********** net/ **********/
73#define NEIGHBOR_DEAD 0xdeadbeef
74#define NETFILTER_LINK_POISON 0xdead57ac
75
76/********** kernel/mutexes **********/ 72/********** kernel/mutexes **********/
77#define MUTEX_DEBUG_INIT 0x11 73#define MUTEX_DEBUG_INIT 0x11
78#define MUTEX_DEBUG_FREE 0x22 74#define MUTEX_DEBUG_FREE 0x22
@@ -83,7 +79,4 @@
83/********** security/ **********/ 79/********** security/ **********/
84#define KEY_DESTROY 0xbd 80#define KEY_DESTROY 0xbd
85 81
86/********** sound/oss/ **********/
87#define OSS_POISON_FREE 0xAB
88
89#endif 82#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 84991f185173..bea8dd8ff5e0 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -84,13 +84,21 @@
84 */ 84 */
85#define in_nmi() (preempt_count() & NMI_MASK) 85#define in_nmi() (preempt_count() & NMI_MASK)
86 86
87/*
88 * The preempt_count offset after preempt_disable();
89 */
87#if defined(CONFIG_PREEMPT_COUNT) 90#if defined(CONFIG_PREEMPT_COUNT)
88# define PREEMPT_DISABLE_OFFSET 1 91# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
89#else 92#else
90# define PREEMPT_DISABLE_OFFSET 0 93# define PREEMPT_DISABLE_OFFSET 0
91#endif 94#endif
92 95
93/* 96/*
97 * The preempt_count offset after spin_lock()
98 */
99#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
100
101/*
94 * The preempt_count offset needed for things like: 102 * The preempt_count offset needed for things like:
95 * 103 *
96 * spin_lock_bh() 104 * spin_lock_bh()
@@ -103,7 +111,7 @@
103 * 111 *
104 * Work as expected. 112 * Work as expected.
105 */ 113 */
106#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) 114#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
107 115
108/* 116/*
109 * Are we running in atomic context? WARNING: this macro cannot 117 * Are we running in atomic context? WARNING: this macro cannot
@@ -124,7 +132,8 @@
124#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 132#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
125extern void preempt_count_add(int val); 133extern void preempt_count_add(int val);
126extern void preempt_count_sub(int val); 134extern void preempt_count_sub(int val);
127#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) 135#define preempt_count_dec_and_test() \
136 ({ preempt_count_sub(1); should_resched(0); })
128#else 137#else
129#define preempt_count_add(val) __preempt_count_add(val) 138#define preempt_count_add(val) __preempt_count_add(val)
130#define preempt_count_sub(val) __preempt_count_sub(val) 139#define preempt_count_sub(val) __preempt_count_sub(val)
@@ -184,7 +193,7 @@ do { \
184 193
185#define preempt_check_resched() \ 194#define preempt_check_resched() \
186do { \ 195do { \
187 if (should_resched()) \ 196 if (should_resched(0)) \
188 __preempt_schedule(); \ 197 __preempt_schedule(); \
189} while (0) 198} while (0)
190 199
diff --git a/include/linux/printk.h b/include/linux/printk.h
index a6298b27ac99..9729565c25ff 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -404,10 +404,10 @@ do { \
404 static DEFINE_RATELIMIT_STATE(_rs, \ 404 static DEFINE_RATELIMIT_STATE(_rs, \
405 DEFAULT_RATELIMIT_INTERVAL, \ 405 DEFAULT_RATELIMIT_INTERVAL, \
406 DEFAULT_RATELIMIT_BURST); \ 406 DEFAULT_RATELIMIT_BURST); \
407 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 407 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
408 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 408 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
409 __ratelimit(&_rs)) \ 409 __ratelimit(&_rs)) \
410 __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ 410 __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
411} while (0) 411} while (0)
412#elif defined(DEBUG) 412#elif defined(DEBUG)
413#define pr_debug_ratelimited(fmt, ...) \ 413#define pr_debug_ratelimited(fmt, ...) \
@@ -456,11 +456,17 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
456 groupsize, buf, len, ascii) \ 456 groupsize, buf, len, ascii) \
457 dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ 457 dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
458 groupsize, buf, len, ascii) 458 groupsize, buf, len, ascii)
459#else 459#elif defined(DEBUG)
460#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ 460#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
461 groupsize, buf, len, ascii) \ 461 groupsize, buf, len, ascii) \
462 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ 462 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
463 groupsize, buf, len, ascii) 463 groupsize, buf, len, ascii)
464#endif /* defined(CONFIG_DYNAMIC_DEBUG) */ 464#else
465static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
466 int rowsize, int groupsize,
467 const void *buf, size_t len, bool ascii)
468{
469}
470#endif
465 471
466#endif 472#endif
diff --git a/include/linux/property.h b/include/linux/property.h
index 76ebde9c11d4..a59c6ee566c2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -166,4 +166,8 @@ void device_add_property_set(struct device *dev, struct property_set *pset);
166 166
167bool device_dma_is_coherent(struct device *dev); 167bool device_dma_is_coherent(struct device *dev);
168 168
169int device_get_phy_mode(struct device *dev);
170
171void *device_get_mac_address(struct device *dev, char *addr, int alen);
172
169#endif /* _LINUX_PROPERTY_H_ */ 173#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 00e8e8fa7358..5440f64d2942 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -33,7 +33,7 @@ struct prop_global {
33/* 33/*
34 * global proportion descriptor 34 * global proportion descriptor
35 * 35 *
36 * this is needed to consitently flip prop_global structures. 36 * this is needed to consistently flip prop_global structures.
37 */ 37 */
38struct prop_descriptor { 38struct prop_descriptor {
39 int index; 39 int index;
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 000000000000..a682fcc91c33
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 */
13
14#ifndef __LINUX_PSCI_H
15#define __LINUX_PSCI_H
16
17#include <linux/init.h>
18#include <linux/types.h>
19
20#define PSCI_POWER_STATE_TYPE_STANDBY 0
21#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
22
23bool psci_tos_resident_on(int cpu);
24
25struct psci_operations {
26 int (*cpu_suspend)(u32 state, unsigned long entry_point);
27 int (*cpu_off)(u32 state);
28 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
29 int (*migrate)(unsigned long cpuid);
30 int (*affinity_info)(unsigned long target_affinity,
31 unsigned long lowest_affinity_level);
32 int (*migrate_info_type)(void);
33};
34
35extern struct psci_operations psci_ops;
36
37#if defined(CONFIG_ARM_PSCI_FW)
38int __init psci_dt_init(void);
39#else
40static inline int psci_dt_init(void) { return 0; }
41#endif
42
43#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
44int __init psci_acpi_init(void);
45bool __init acpi_psci_present(void);
46bool __init acpi_psci_use_hvc(void);
47#else
48static inline int psci_acpi_init(void) { return 0; }
49static inline bool acpi_psci_present(void) { return false; }
50#endif
51
52#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 987a73a40ef8..061265f92876 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -34,6 +34,7 @@
34#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) 34#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
35 35
36#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) 36#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
37#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
37 38
38/* single stepping state bits (used on ARM and PA-RISC) */ 39/* single stepping state bits (used on ARM and PA-RISC) */
39#define PT_SINGLESTEP_BIT 31 40#define PT_SINGLESTEP_BIT 31
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 36262d08a9da..d681f6875aef 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -79,26 +79,43 @@ enum {
79 PWMF_EXPORTED = 1 << 2, 79 PWMF_EXPORTED = 1 << 2,
80}; 80};
81 81
82/**
83 * struct pwm_device - PWM channel object
84 * @label: name of the PWM device
85 * @flags: flags associated with the PWM device
86 * @hwpwm: per-chip relative index of the PWM device
87 * @pwm: global index of the PWM device
88 * @chip: PWM chip providing this PWM device
89 * @chip_data: chip-private data associated with the PWM device
90 * @period: period of the PWM signal (in nanoseconds)
91 * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
92 * @polarity: polarity of the PWM signal
93 */
82struct pwm_device { 94struct pwm_device {
83 const char *label; 95 const char *label;
84 unsigned long flags; 96 unsigned long flags;
85 unsigned int hwpwm; 97 unsigned int hwpwm;
86 unsigned int pwm; 98 unsigned int pwm;
87 struct pwm_chip *chip; 99 struct pwm_chip *chip;
88 void *chip_data; 100 void *chip_data;
89 101
90 unsigned int period; /* in nanoseconds */ 102 unsigned int period;
91 unsigned int duty_cycle; /* in nanoseconds */ 103 unsigned int duty_cycle;
92 enum pwm_polarity polarity; 104 enum pwm_polarity polarity;
93}; 105};
94 106
107static inline bool pwm_is_enabled(const struct pwm_device *pwm)
108{
109 return test_bit(PWMF_ENABLED, &pwm->flags);
110}
111
95static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) 112static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
96{ 113{
97 if (pwm) 114 if (pwm)
98 pwm->period = period; 115 pwm->period = period;
99} 116}
100 117
101static inline unsigned int pwm_get_period(struct pwm_device *pwm) 118static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
102{ 119{
103 return pwm ? pwm->period : 0; 120 return pwm ? pwm->period : 0;
104} 121}
@@ -109,7 +126,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
109 pwm->duty_cycle = duty; 126 pwm->duty_cycle = duty;
110} 127}
111 128
112static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm) 129static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
113{ 130{
114 return pwm ? pwm->duty_cycle : 0; 131 return pwm ? pwm->duty_cycle : 0;
115} 132}
@@ -119,6 +136,11 @@ static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
119 */ 136 */
120int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); 137int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
121 138
139static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
140{
141 return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
142}
143
122/** 144/**
123 * struct pwm_ops - PWM controller operations 145 * struct pwm_ops - PWM controller operations
124 * @request: optional hook for requesting a PWM 146 * @request: optional hook for requesting a PWM
@@ -131,25 +153,18 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
131 * @owner: helps prevent removal of modules exporting active PWMs 153 * @owner: helps prevent removal of modules exporting active PWMs
132 */ 154 */
133struct pwm_ops { 155struct pwm_ops {
134 int (*request)(struct pwm_chip *chip, 156 int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
135 struct pwm_device *pwm); 157 void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
136 void (*free)(struct pwm_chip *chip, 158 int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
137 struct pwm_device *pwm); 159 int duty_ns, int period_ns);
138 int (*config)(struct pwm_chip *chip, 160 int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
139 struct pwm_device *pwm, 161 enum pwm_polarity polarity);
140 int duty_ns, int period_ns); 162 int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
141 int (*set_polarity)(struct pwm_chip *chip, 163 void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
142 struct pwm_device *pwm,
143 enum pwm_polarity polarity);
144 int (*enable)(struct pwm_chip *chip,
145 struct pwm_device *pwm);
146 void (*disable)(struct pwm_chip *chip,
147 struct pwm_device *pwm);
148#ifdef CONFIG_DEBUG_FS 164#ifdef CONFIG_DEBUG_FS
149 void (*dbg_show)(struct pwm_chip *chip, 165 void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
150 struct seq_file *s);
151#endif 166#endif
152 struct module *owner; 167 struct module *owner;
153}; 168};
154 169
155/** 170/**
@@ -160,22 +175,24 @@ struct pwm_ops {
160 * @base: number of first PWM controlled by this chip 175 * @base: number of first PWM controlled by this chip
161 * @npwm: number of PWMs controlled by this chip 176 * @npwm: number of PWMs controlled by this chip
162 * @pwms: array of PWM devices allocated by the framework 177 * @pwms: array of PWM devices allocated by the framework
178 * @of_xlate: request a PWM device given a device tree PWM specifier
179 * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
163 * @can_sleep: must be true if the .config(), .enable() or .disable() 180 * @can_sleep: must be true if the .config(), .enable() or .disable()
164 * operations may sleep 181 * operations may sleep
165 */ 182 */
166struct pwm_chip { 183struct pwm_chip {
167 struct device *dev; 184 struct device *dev;
168 struct list_head list; 185 struct list_head list;
169 const struct pwm_ops *ops; 186 const struct pwm_ops *ops;
170 int base; 187 int base;
171 unsigned int npwm; 188 unsigned int npwm;
172 189
173 struct pwm_device *pwms; 190 struct pwm_device *pwms;
174 191
175 struct pwm_device * (*of_xlate)(struct pwm_chip *pc, 192 struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
176 const struct of_phandle_args *args); 193 const struct of_phandle_args *args);
177 unsigned int of_pwm_n_cells; 194 unsigned int of_pwm_n_cells;
178 bool can_sleep; 195 bool can_sleep;
179}; 196};
180 197
181#if IS_ENABLED(CONFIG_PWM) 198#if IS_ENABLED(CONFIG_PWM)
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 0485bab061fd..92273776bce6 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
197 QUARK_X1000_SSP, 197 QUARK_X1000_SSP,
198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ 198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
199 LPSS_BYT_SSP, 199 LPSS_BYT_SSP,
200 LPSS_SPT_SSP,
200}; 201};
201 202
202struct ssp_device { 203struct ssp_device {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca6601ff25..7a57c28eb5e7 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number);
43void inode_sub_rsv_space(struct inode *inode, qsize_t number); 43void inode_sub_rsv_space(struct inode *inode, qsize_t number);
44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); 44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
45 45
46void dquot_initialize(struct inode *inode); 46int dquot_initialize(struct inode *inode);
47void dquot_drop(struct inode *inode); 47void dquot_drop(struct inode *inode);
48struct dquot *dqget(struct super_block *sb, struct kqid qid); 48struct dquot *dqget(struct super_block *sb, struct kqid qid);
49static inline struct dquot *dqgrab(struct dquot *dquot) 49static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@ static inline int sb_has_quota_active(struct super_block *sb, int type)
200 return 0; 200 return 0;
201} 201}
202 202
203static inline void dquot_initialize(struct inode *inode) 203static inline int dquot_initialize(struct inode *inode)
204{ 204{
205 return 0;
205} 206}
206 207
207static inline void dquot_drop(struct inode *inode) 208static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4cf5f51b4c9c..ff476515f716 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -226,6 +226,37 @@ struct rcu_synchronize {
226}; 226};
227void wakeme_after_rcu(struct rcu_head *head); 227void wakeme_after_rcu(struct rcu_head *head);
228 228
229void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
230 struct rcu_synchronize *rs_array);
231
232#define _wait_rcu_gp(checktiny, ...) \
233do { \
234 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
235 const int __n = ARRAY_SIZE(__crcu_array); \
236 struct rcu_synchronize __rs_array[__n]; \
237 \
238 __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
239} while (0)
240
241#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
242
243/**
244 * synchronize_rcu_mult - Wait concurrently for multiple grace periods
245 * @...: List of call_rcu() functions for the flavors to wait on.
246 *
247 * This macro waits concurrently for multiple flavors of RCU grace periods.
248 * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
249 * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
250 * domain requires you to write a wrapper function for that SRCU domain's
251 * call_srcu() function, supplying the corresponding srcu_struct.
252 *
253 * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
254 * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
255 * is automatically a grace period.
256 */
257#define synchronize_rcu_mult(...) \
258 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
259
229/** 260/**
230 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 261 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
231 * @head: structure to be used for queueing the RCU updates. 262 * @head: structure to be used for queueing the RCU updates.
@@ -309,7 +340,7 @@ static inline void rcu_sysrq_end(void)
309} 340}
310#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ 341#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
311 342
312#ifdef CONFIG_RCU_USER_QS 343#ifdef CONFIG_NO_HZ_FULL
313void rcu_user_enter(void); 344void rcu_user_enter(void);
314void rcu_user_exit(void); 345void rcu_user_exit(void);
315#else 346#else
@@ -317,7 +348,7 @@ static inline void rcu_user_enter(void) { }
317static inline void rcu_user_exit(void) { } 348static inline void rcu_user_exit(void) { }
318static inline void rcu_user_hooks_switch(struct task_struct *prev, 349static inline void rcu_user_hooks_switch(struct task_struct *prev,
319 struct task_struct *next) { } 350 struct task_struct *next) { }
320#endif /* CONFIG_RCU_USER_QS */ 351#endif /* CONFIG_NO_HZ_FULL */
321 352
322#ifdef CONFIG_RCU_NOCB_CPU 353#ifdef CONFIG_RCU_NOCB_CPU
323void rcu_init_nohz(void); 354void rcu_init_nohz(void);
@@ -392,10 +423,6 @@ bool __rcu_is_watching(void);
392 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 423 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
393 */ 424 */
394 425
395typedef void call_rcu_func_t(struct rcu_head *head,
396 void (*func)(struct rcu_head *head));
397void wait_rcu_gp(call_rcu_func_t crf);
398
399#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 426#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
400#include <linux/rcutree.h> 427#include <linux/rcutree.h>
401#elif defined(CONFIG_TINY_RCU) 428#elif defined(CONFIG_TINY_RCU)
@@ -469,46 +496,10 @@ int rcu_read_lock_bh_held(void);
469 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 496 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
470 * RCU-sched read-side critical section. In absence of 497 * RCU-sched read-side critical section. In absence of
471 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 498 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
472 * critical section unless it can prove otherwise. Note that disabling 499 * critical section unless it can prove otherwise.
473 * of preemption (including disabling irqs) counts as an RCU-sched
474 * read-side critical section. This is useful for debug checks in functions
475 * that required that they be called within an RCU-sched read-side
476 * critical section.
477 *
478 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
479 * and while lockdep is disabled.
480 *
481 * Note that if the CPU is in the idle loop from an RCU point of
482 * view (ie: that we are in the section between rcu_idle_enter() and
483 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
484 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
485 * that are in such a section, considering these as in extended quiescent
486 * state, so such a CPU is effectively never in an RCU read-side critical
487 * section regardless of what RCU primitives it invokes. This state of
488 * affairs is required --- we need to keep an RCU-free window in idle
489 * where the CPU may possibly enter into low power mode. This way we can
490 * notice an extended quiescent state to other CPUs that started a grace
491 * period. Otherwise we would delay any grace period as long as we run in
492 * the idle task.
493 *
494 * Similarly, we avoid claiming an SRCU read lock held if the current
495 * CPU is offline.
496 */ 500 */
497#ifdef CONFIG_PREEMPT_COUNT 501#ifdef CONFIG_PREEMPT_COUNT
498static inline int rcu_read_lock_sched_held(void) 502int rcu_read_lock_sched_held(void);
499{
500 int lockdep_opinion = 0;
501
502 if (!debug_lockdep_rcu_enabled())
503 return 1;
504 if (!rcu_is_watching())
505 return 0;
506 if (!rcu_lockdep_current_cpu_online())
507 return 0;
508 if (debug_locks)
509 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
510 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
511}
512#else /* #ifdef CONFIG_PREEMPT_COUNT */ 503#else /* #ifdef CONFIG_PREEMPT_COUNT */
513static inline int rcu_read_lock_sched_held(void) 504static inline int rcu_read_lock_sched_held(void)
514{ 505{
@@ -545,6 +536,11 @@ static inline int rcu_read_lock_sched_held(void)
545 536
546#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 537#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
547 538
539/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */
540static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
541{
542}
543
548#ifdef CONFIG_PROVE_RCU 544#ifdef CONFIG_PROVE_RCU
549 545
550/** 546/**
@@ -555,17 +551,32 @@ static inline int rcu_read_lock_sched_held(void)
555#define rcu_lockdep_assert(c, s) \ 551#define rcu_lockdep_assert(c, s) \
556 do { \ 552 do { \
557 static bool __section(.data.unlikely) __warned; \ 553 static bool __section(.data.unlikely) __warned; \
554 deprecate_rcu_lockdep_assert(); \
558 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ 555 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
559 __warned = true; \ 556 __warned = true; \
560 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ 557 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
561 } \ 558 } \
562 } while (0) 559 } while (0)
563 560
561/**
562 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
563 * @c: condition to check
564 * @s: informative message
565 */
566#define RCU_LOCKDEP_WARN(c, s) \
567 do { \
568 static bool __section(.data.unlikely) __warned; \
569 if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
570 __warned = true; \
571 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
572 } \
573 } while (0)
574
564#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) 575#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
565static inline void rcu_preempt_sleep_check(void) 576static inline void rcu_preempt_sleep_check(void)
566{ 577{
567 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), 578 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
568 "Illegal context switch in RCU read-side critical section"); 579 "Illegal context switch in RCU read-side critical section");
569} 580}
570#else /* #ifdef CONFIG_PROVE_RCU */ 581#else /* #ifdef CONFIG_PROVE_RCU */
571static inline void rcu_preempt_sleep_check(void) 582static inline void rcu_preempt_sleep_check(void)
@@ -576,15 +587,16 @@ static inline void rcu_preempt_sleep_check(void)
576#define rcu_sleep_check() \ 587#define rcu_sleep_check() \
577 do { \ 588 do { \
578 rcu_preempt_sleep_check(); \ 589 rcu_preempt_sleep_check(); \
579 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ 590 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
580 "Illegal context switch in RCU-bh read-side critical section"); \ 591 "Illegal context switch in RCU-bh read-side critical section"); \
581 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ 592 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
582 "Illegal context switch in RCU-sched read-side critical section"); \ 593 "Illegal context switch in RCU-sched read-side critical section"); \
583 } while (0) 594 } while (0)
584 595
585#else /* #ifdef CONFIG_PROVE_RCU */ 596#else /* #ifdef CONFIG_PROVE_RCU */
586 597
587#define rcu_lockdep_assert(c, s) do { } while (0) 598#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
599#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
588#define rcu_sleep_check() do { } while (0) 600#define rcu_sleep_check() do { } while (0)
589 601
590#endif /* #else #ifdef CONFIG_PROVE_RCU */ 602#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -615,13 +627,13 @@ static inline void rcu_preempt_sleep_check(void)
615({ \ 627({ \
616 /* Dependency order vs. p above. */ \ 628 /* Dependency order vs. p above. */ \
617 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ 629 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
618 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ 630 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
619 rcu_dereference_sparse(p, space); \ 631 rcu_dereference_sparse(p, space); \
620 ((typeof(*p) __force __kernel *)(________p1)); \ 632 ((typeof(*p) __force __kernel *)(________p1)); \
621}) 633})
622#define __rcu_dereference_protected(p, c, space) \ 634#define __rcu_dereference_protected(p, c, space) \
623({ \ 635({ \
624 rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \ 636 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
625 rcu_dereference_sparse(p, space); \ 637 rcu_dereference_sparse(p, space); \
626 ((typeof(*p) __force __kernel *)(p)); \ 638 ((typeof(*p) __force __kernel *)(p)); \
627}) 639})
@@ -845,8 +857,8 @@ static inline void rcu_read_lock(void)
845 __rcu_read_lock(); 857 __rcu_read_lock();
846 __acquire(RCU); 858 __acquire(RCU);
847 rcu_lock_acquire(&rcu_lock_map); 859 rcu_lock_acquire(&rcu_lock_map);
848 rcu_lockdep_assert(rcu_is_watching(), 860 RCU_LOCKDEP_WARN(!rcu_is_watching(),
849 "rcu_read_lock() used illegally while idle"); 861 "rcu_read_lock() used illegally while idle");
850} 862}
851 863
852/* 864/*
@@ -896,8 +908,8 @@ static inline void rcu_read_lock(void)
896 */ 908 */
897static inline void rcu_read_unlock(void) 909static inline void rcu_read_unlock(void)
898{ 910{
899 rcu_lockdep_assert(rcu_is_watching(), 911 RCU_LOCKDEP_WARN(!rcu_is_watching(),
900 "rcu_read_unlock() used illegally while idle"); 912 "rcu_read_unlock() used illegally while idle");
901 __release(RCU); 913 __release(RCU);
902 __rcu_read_unlock(); 914 __rcu_read_unlock();
903 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ 915 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -925,8 +937,8 @@ static inline void rcu_read_lock_bh(void)
925 local_bh_disable(); 937 local_bh_disable();
926 __acquire(RCU_BH); 938 __acquire(RCU_BH);
927 rcu_lock_acquire(&rcu_bh_lock_map); 939 rcu_lock_acquire(&rcu_bh_lock_map);
928 rcu_lockdep_assert(rcu_is_watching(), 940 RCU_LOCKDEP_WARN(!rcu_is_watching(),
929 "rcu_read_lock_bh() used illegally while idle"); 941 "rcu_read_lock_bh() used illegally while idle");
930} 942}
931 943
932/* 944/*
@@ -936,8 +948,8 @@ static inline void rcu_read_lock_bh(void)
936 */ 948 */
937static inline void rcu_read_unlock_bh(void) 949static inline void rcu_read_unlock_bh(void)
938{ 950{
939 rcu_lockdep_assert(rcu_is_watching(), 951 RCU_LOCKDEP_WARN(!rcu_is_watching(),
940 "rcu_read_unlock_bh() used illegally while idle"); 952 "rcu_read_unlock_bh() used illegally while idle");
941 rcu_lock_release(&rcu_bh_lock_map); 953 rcu_lock_release(&rcu_bh_lock_map);
942 __release(RCU_BH); 954 __release(RCU_BH);
943 local_bh_enable(); 955 local_bh_enable();
@@ -961,8 +973,8 @@ static inline void rcu_read_lock_sched(void)
961 preempt_disable(); 973 preempt_disable();
962 __acquire(RCU_SCHED); 974 __acquire(RCU_SCHED);
963 rcu_lock_acquire(&rcu_sched_lock_map); 975 rcu_lock_acquire(&rcu_sched_lock_map);
964 rcu_lockdep_assert(rcu_is_watching(), 976 RCU_LOCKDEP_WARN(!rcu_is_watching(),
965 "rcu_read_lock_sched() used illegally while idle"); 977 "rcu_read_lock_sched() used illegally while idle");
966} 978}
967 979
968/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 980/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -979,8 +991,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
979 */ 991 */
980static inline void rcu_read_unlock_sched(void) 992static inline void rcu_read_unlock_sched(void)
981{ 993{
982 rcu_lockdep_assert(rcu_is_watching(), 994 RCU_LOCKDEP_WARN(!rcu_is_watching(),
983 "rcu_read_unlock_sched() used illegally while idle"); 995 "rcu_read_unlock_sched() used illegally while idle");
984 rcu_lock_release(&rcu_sched_lock_map); 996 rcu_lock_release(&rcu_sched_lock_map);
985 __release(RCU_SCHED); 997 __release(RCU_SCHED);
986 preempt_enable(); 998 preempt_enable();
@@ -1031,7 +1043,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1031#define RCU_INIT_POINTER(p, v) \ 1043#define RCU_INIT_POINTER(p, v) \
1032 do { \ 1044 do { \
1033 rcu_dereference_sparse(p, __rcu); \ 1045 rcu_dereference_sparse(p, __rcu); \
1034 p = RCU_INITIALIZER(v); \ 1046 WRITE_ONCE(p, RCU_INITIALIZER(v)); \
1035 } while (0) 1047 } while (0)
1036 1048
1037/** 1049/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 3df6c1ec4e25..ff968b7af3a4 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -37,6 +37,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
37 might_sleep(); 37 might_sleep();
38} 38}
39 39
40static inline unsigned long get_state_synchronize_sched(void)
41{
42 return 0;
43}
44
45static inline void cond_synchronize_sched(unsigned long oldstate)
46{
47 might_sleep();
48}
49
40static inline void rcu_barrier_bh(void) 50static inline void rcu_barrier_bh(void)
41{ 51{
42 wait_rcu_gp(call_rcu_bh); 52 wait_rcu_gp(call_rcu_bh);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 456879143f89..5abec82f325e 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -76,6 +76,8 @@ void rcu_barrier_bh(void);
76void rcu_barrier_sched(void); 76void rcu_barrier_sched(void);
77unsigned long get_state_synchronize_rcu(void); 77unsigned long get_state_synchronize_rcu(void);
78void cond_synchronize_rcu(unsigned long oldstate); 78void cond_synchronize_rcu(unsigned long oldstate);
79unsigned long get_state_synchronize_sched(void);
80void cond_synchronize_sched(unsigned long oldstate);
79 81
80extern unsigned long rcutorture_testseq; 82extern unsigned long rcutorture_testseq;
81extern unsigned long rcutorture_vernum; 83extern unsigned long rcutorture_vernum;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea0f0b5..8fc0bfd8edc4 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -17,6 +17,7 @@
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/bug.h> 19#include <linux/bug.h>
20#include <linux/lockdep.h>
20 21
21struct module; 22struct module;
22struct device; 23struct device;
@@ -50,6 +51,20 @@ struct reg_default {
50 unsigned int def; 51 unsigned int def;
51}; 52};
52 53
54/**
55 * Register/value pairs for sequences of writes with an optional delay in
56 * microseconds to be applied after each write.
57 *
58 * @reg: Register address.
59 * @def: Register value.
60 * @delay_us: Delay to be applied after the register write in microseconds
61 */
62struct reg_sequence {
63 unsigned int reg;
64 unsigned int def;
65 unsigned int delay_us;
66};
67
53#ifdef CONFIG_REGMAP 68#ifdef CONFIG_REGMAP
54 69
55enum regmap_endian { 70enum regmap_endian {
@@ -296,8 +311,12 @@ typedef void (*regmap_hw_free_context)(void *context);
296 * if not implemented on a given device. 311 * if not implemented on a given device.
297 * @async_write: Write operation which completes asynchronously, optional and 312 * @async_write: Write operation which completes asynchronously, optional and
298 * must serialise with respect to non-async I/O. 313 * must serialise with respect to non-async I/O.
314 * @reg_write: Write a single register value to the given register address. This
315 * write operation has to complete when returning from the function.
299 * @read: Read operation. Data is returned in the buffer used to transmit 316 * @read: Read operation. Data is returned in the buffer used to transmit
300 * data. 317 * data.
318 * @reg_read: Read a single register value from a given register address.
319 * @free_context: Free context.
301 * @async_alloc: Allocate a regmap_async() structure. 320 * @async_alloc: Allocate a regmap_async() structure.
302 * @read_flag_mask: Mask to be set in the top byte of the register when doing 321 * @read_flag_mask: Mask to be set in the top byte of the register when doing
303 * a read. 322 * a read.
@@ -307,7 +326,8 @@ typedef void (*regmap_hw_free_context)(void *context);
307 * @val_format_endian_default: Default endianness for formatted register 326 * @val_format_endian_default: Default endianness for formatted register
308 * values. Used when the regmap_config specifies DEFAULT. If this is 327 * values. Used when the regmap_config specifies DEFAULT. If this is
309 * DEFAULT, BIG is assumed. 328 * DEFAULT, BIG is assumed.
310 * @async_size: Size of struct used for async work. 329 * @max_raw_read: Max raw read size that can be used on the bus.
330 * @max_raw_write: Max raw write size that can be used on the bus.
311 */ 331 */
312struct regmap_bus { 332struct regmap_bus {
313 bool fast_io; 333 bool fast_io;
@@ -322,47 +342,186 @@ struct regmap_bus {
322 u8 read_flag_mask; 342 u8 read_flag_mask;
323 enum regmap_endian reg_format_endian_default; 343 enum regmap_endian reg_format_endian_default;
324 enum regmap_endian val_format_endian_default; 344 enum regmap_endian val_format_endian_default;
345 size_t max_raw_read;
346 size_t max_raw_write;
325}; 347};
326 348
327struct regmap *regmap_init(struct device *dev, 349/*
328 const struct regmap_bus *bus, 350 * __regmap_init functions.
329 void *bus_context, 351 *
330 const struct regmap_config *config); 352 * These functions take a lock key and name parameter, and should not be called
353 * directly. Instead, use the regmap_init macros that generate a key and name
354 * for each call.
355 */
356struct regmap *__regmap_init(struct device *dev,
357 const struct regmap_bus *bus,
358 void *bus_context,
359 const struct regmap_config *config,
360 struct lock_class_key *lock_key,
361 const char *lock_name);
362struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
363 const struct regmap_config *config,
364 struct lock_class_key *lock_key,
365 const char *lock_name);
366struct regmap *__regmap_init_spi(struct spi_device *dev,
367 const struct regmap_config *config,
368 struct lock_class_key *lock_key,
369 const char *lock_name);
370struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
371 const struct regmap_config *config,
372 struct lock_class_key *lock_key,
373 const char *lock_name);
374struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
375 const struct regmap_config *config,
376 struct lock_class_key *lock_key,
377 const char *lock_name);
378struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
379 void __iomem *regs,
380 const struct regmap_config *config,
381 struct lock_class_key *lock_key,
382 const char *lock_name);
383struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
384 const struct regmap_config *config,
385 struct lock_class_key *lock_key,
386 const char *lock_name);
387
388struct regmap *__devm_regmap_init(struct device *dev,
389 const struct regmap_bus *bus,
390 void *bus_context,
391 const struct regmap_config *config,
392 struct lock_class_key *lock_key,
393 const char *lock_name);
394struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
395 const struct regmap_config *config,
396 struct lock_class_key *lock_key,
397 const char *lock_name);
398struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
399 const struct regmap_config *config,
400 struct lock_class_key *lock_key,
401 const char *lock_name);
402struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
403 const struct regmap_config *config,
404 struct lock_class_key *lock_key,
405 const char *lock_name);
406struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
407 const struct regmap_config *config,
408 struct lock_class_key *lock_key,
409 const char *lock_name);
410struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
411 const char *clk_id,
412 void __iomem *regs,
413 const struct regmap_config *config,
414 struct lock_class_key *lock_key,
415 const char *lock_name);
416struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
417 const struct regmap_config *config,
418 struct lock_class_key *lock_key,
419 const char *lock_name);
420
421/*
422 * Wrapper for regmap_init macros to include a unique lockdep key and name
423 * for each call. No-op if CONFIG_LOCKDEP is not set.
424 *
425 * @fn: Real function to call (in the form __[*_]regmap_init[_*])
426 * @name: Config variable name (#config in the calling macro)
427 **/
428#ifdef CONFIG_LOCKDEP
429#define __regmap_lockdep_wrapper(fn, name, ...) \
430( \
431 ({ \
432 static struct lock_class_key _key; \
433 fn(__VA_ARGS__, &_key, \
434 KBUILD_BASENAME ":" \
435 __stringify(__LINE__) ":" \
436 "(" name ")->lock"); \
437 }) \
438)
439#else
440#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
441#endif
442
443/**
444 * regmap_init(): Initialise register map
445 *
446 * @dev: Device that will be interacted with
447 * @bus: Bus-specific callbacks to use with device
448 * @bus_context: Data passed to bus-specific callbacks
449 * @config: Configuration for register map
450 *
451 * The return value will be an ERR_PTR() on error or a valid pointer to
452 * a struct regmap. This function should generally not be called
453 * directly, it should be called by bus-specific init functions.
454 */
455#define regmap_init(dev, bus, bus_context, config) \
456 __regmap_lockdep_wrapper(__regmap_init, #config, \
457 dev, bus, bus_context, config)
331int regmap_attach_dev(struct device *dev, struct regmap *map, 458int regmap_attach_dev(struct device *dev, struct regmap *map,
332 const struct regmap_config *config); 459 const struct regmap_config *config);
333struct regmap *regmap_init_i2c(struct i2c_client *i2c,
334 const struct regmap_config *config);
335struct regmap *regmap_init_spi(struct spi_device *dev,
336 const struct regmap_config *config);
337struct regmap *regmap_init_spmi_base(struct spmi_device *dev,
338 const struct regmap_config *config);
339struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
340 const struct regmap_config *config);
341struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
342 void __iomem *regs,
343 const struct regmap_config *config);
344struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
345 const struct regmap_config *config);
346
347struct regmap *devm_regmap_init(struct device *dev,
348 const struct regmap_bus *bus,
349 void *bus_context,
350 const struct regmap_config *config);
351struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
352 const struct regmap_config *config);
353struct regmap *devm_regmap_init_spi(struct spi_device *dev,
354 const struct regmap_config *config);
355struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev,
356 const struct regmap_config *config);
357struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
358 const struct regmap_config *config);
359struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
360 void __iomem *regs,
361 const struct regmap_config *config);
362struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
363 const struct regmap_config *config);
364 460
365bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); 461/**
462 * regmap_init_i2c(): Initialise register map
463 *
464 * @i2c: Device that will be interacted with
465 * @config: Configuration for register map
466 *
467 * The return value will be an ERR_PTR() on error or a valid pointer to
468 * a struct regmap.
469 */
470#define regmap_init_i2c(i2c, config) \
471 __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \
472 i2c, config)
473
474/**
475 * regmap_init_spi(): Initialise register map
476 *
477 * @spi: Device that will be interacted with
478 * @config: Configuration for register map
479 *
480 * The return value will be an ERR_PTR() on error or a valid pointer to
481 * a struct regmap.
482 */
483#define regmap_init_spi(dev, config) \
484 __regmap_lockdep_wrapper(__regmap_init_spi, #config, \
485 dev, config)
486
487/**
488 * regmap_init_spmi_base(): Create regmap for the Base register space
489 * @sdev: SPMI device that will be interacted with
490 * @config: Configuration for register map
491 *
492 * The return value will be an ERR_PTR() on error or a valid pointer to
493 * a struct regmap.
494 */
495#define regmap_init_spmi_base(dev, config) \
496 __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \
497 dev, config)
498
499/**
500 * regmap_init_spmi_ext(): Create regmap for Ext register space
501 * @sdev: Device that will be interacted with
502 * @config: Configuration for register map
503 *
504 * The return value will be an ERR_PTR() on error or a valid pointer to
505 * a struct regmap.
506 */
507#define regmap_init_spmi_ext(dev, config) \
508 __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \
509 dev, config)
510
511/**
512 * regmap_init_mmio_clk(): Initialise register map with register clock
513 *
514 * @dev: Device that will be interacted with
515 * @clk_id: register clock consumer ID
516 * @regs: Pointer to memory-mapped IO region
517 * @config: Configuration for register map
518 *
519 * The return value will be an ERR_PTR() on error or a valid pointer to
520 * a struct regmap.
521 */
522#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
523 __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
524 dev, clk_id, regs, config)
366 525
367/** 526/**
368 * regmap_init_mmio(): Initialise register map 527 * regmap_init_mmio(): Initialise register map
@@ -374,12 +533,109 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
374 * The return value will be an ERR_PTR() on error or a valid pointer to 533 * The return value will be an ERR_PTR() on error or a valid pointer to
375 * a struct regmap. 534 * a struct regmap.
376 */ 535 */
377static inline struct regmap *regmap_init_mmio(struct device *dev, 536#define regmap_init_mmio(dev, regs, config) \
378 void __iomem *regs, 537 regmap_init_mmio_clk(dev, NULL, regs, config)
379 const struct regmap_config *config) 538
380{ 539/**
381 return regmap_init_mmio_clk(dev, NULL, regs, config); 540 * regmap_init_ac97(): Initialise AC'97 register map
382} 541 *
542 * @ac97: Device that will be interacted with
543 * @config: Configuration for register map
544 *
545 * The return value will be an ERR_PTR() on error or a valid pointer to
546 * a struct regmap.
547 */
548#define regmap_init_ac97(ac97, config) \
549 __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \
550 ac97, config)
551bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
552
553/**
554 * devm_regmap_init(): Initialise managed register map
555 *
556 * @dev: Device that will be interacted with
557 * @bus: Bus-specific callbacks to use with device
558 * @bus_context: Data passed to bus-specific callbacks
559 * @config: Configuration for register map
560 *
561 * The return value will be an ERR_PTR() on error or a valid pointer
562 * to a struct regmap. This function should generally not be called
563 * directly, it should be called by bus-specific init functions. The
564 * map will be automatically freed by the device management code.
565 */
566#define devm_regmap_init(dev, bus, bus_context, config) \
567 __regmap_lockdep_wrapper(__devm_regmap_init, #config, \
568 dev, bus, bus_context, config)
569
570/**
571 * devm_regmap_init_i2c(): Initialise managed register map
572 *
573 * @i2c: Device that will be interacted with
574 * @config: Configuration for register map
575 *
576 * The return value will be an ERR_PTR() on error or a valid pointer
577 * to a struct regmap. The regmap will be automatically freed by the
578 * device management code.
579 */
580#define devm_regmap_init_i2c(i2c, config) \
581 __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \
582 i2c, config)
583
584/**
585 * devm_regmap_init_spi(): Initialise register map
586 *
587 * @spi: Device that will be interacted with
588 * @config: Configuration for register map
589 *
590 * The return value will be an ERR_PTR() on error or a valid pointer
591 * to a struct regmap. The map will be automatically freed by the
592 * device management code.
593 */
594#define devm_regmap_init_spi(dev, config) \
595 __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \
596 dev, config)
597
598/**
599 * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
600 * @sdev: SPMI device that will be interacted with
601 * @config: Configuration for register map
602 *
603 * The return value will be an ERR_PTR() on error or a valid pointer
604 * to a struct regmap. The regmap will be automatically freed by the
605 * device management code.
606 */
607#define devm_regmap_init_spmi_base(dev, config) \
608 __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \
609 dev, config)
610
611/**
612 * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
613 * @sdev: SPMI device that will be interacted with
614 * @config: Configuration for register map
615 *
616 * The return value will be an ERR_PTR() on error or a valid pointer
617 * to a struct regmap. The regmap will be automatically freed by the
618 * device management code.
619 */
620#define devm_regmap_init_spmi_ext(dev, config) \
621 __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \
622 dev, config)
623
624/**
625 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
626 *
627 * @dev: Device that will be interacted with
628 * @clk_id: register clock consumer ID
629 * @regs: Pointer to memory-mapped IO region
630 * @config: Configuration for register map
631 *
632 * The return value will be an ERR_PTR() on error or a valid pointer
633 * to a struct regmap. The regmap will be automatically freed by the
634 * device management code.
635 */
636#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
637 __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
638 dev, clk_id, regs, config)
383 639
384/** 640/**
385 * devm_regmap_init_mmio(): Initialise managed register map 641 * devm_regmap_init_mmio(): Initialise managed register map
@@ -392,12 +648,22 @@ static inline struct regmap *regmap_init_mmio(struct device *dev,
392 * to a struct regmap. The regmap will be automatically freed by the 648 * to a struct regmap. The regmap will be automatically freed by the
393 * device management code. 649 * device management code.
394 */ 650 */
395static inline struct regmap *devm_regmap_init_mmio(struct device *dev, 651#define devm_regmap_init_mmio(dev, regs, config) \
396 void __iomem *regs, 652 devm_regmap_init_mmio_clk(dev, NULL, regs, config)
397 const struct regmap_config *config) 653
398{ 654/**
399 return devm_regmap_init_mmio_clk(dev, NULL, regs, config); 655 * devm_regmap_init_ac97(): Initialise AC'97 register map
400} 656 *
657 * @ac97: Device that will be interacted with
658 * @config: Configuration for register map
659 *
660 * The return value will be an ERR_PTR() on error or a valid pointer
661 * to a struct regmap. The regmap will be automatically freed by the
662 * device management code.
663 */
664#define devm_regmap_init_ac97(ac97, config) \
665 __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
666 ac97, config)
401 667
402void regmap_exit(struct regmap *map); 668void regmap_exit(struct regmap *map);
403int regmap_reinit_cache(struct regmap *map, 669int regmap_reinit_cache(struct regmap *map,
@@ -410,10 +676,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
410 const void *val, size_t val_len); 676 const void *val, size_t val_len);
411int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 677int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
412 size_t val_count); 678 size_t val_count);
413int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, 679int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
414 int num_regs); 680 int num_regs);
415int regmap_multi_reg_write_bypassed(struct regmap *map, 681int regmap_multi_reg_write_bypassed(struct regmap *map,
416 const struct reg_default *regs, 682 const struct reg_sequence *regs,
417 int num_regs); 683 int num_regs);
418int regmap_raw_write_async(struct regmap *map, unsigned int reg, 684int regmap_raw_write_async(struct regmap *map, unsigned int reg,
419 const void *val, size_t val_len); 685 const void *val, size_t val_len);
@@ -424,6 +690,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
424 size_t val_count); 690 size_t val_count);
425int regmap_update_bits(struct regmap *map, unsigned int reg, 691int regmap_update_bits(struct regmap *map, unsigned int reg,
426 unsigned int mask, unsigned int val); 692 unsigned int mask, unsigned int val);
693int regmap_write_bits(struct regmap *map, unsigned int reg,
694 unsigned int mask, unsigned int val);
427int regmap_update_bits_async(struct regmap *map, unsigned int reg, 695int regmap_update_bits_async(struct regmap *map, unsigned int reg,
428 unsigned int mask, unsigned int val); 696 unsigned int mask, unsigned int val);
429int regmap_update_bits_check(struct regmap *map, unsigned int reg, 697int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -437,6 +705,8 @@ int regmap_get_max_register(struct regmap *map);
437int regmap_get_reg_stride(struct regmap *map); 705int regmap_get_reg_stride(struct regmap *map);
438int regmap_async_complete(struct regmap *map); 706int regmap_async_complete(struct regmap *map);
439bool regmap_can_raw_write(struct regmap *map); 707bool regmap_can_raw_write(struct regmap *map);
708size_t regmap_get_raw_read_max(struct regmap *map);
709size_t regmap_get_raw_write_max(struct regmap *map);
440 710
441int regcache_sync(struct regmap *map); 711int regcache_sync(struct regmap *map);
442int regcache_sync_region(struct regmap *map, unsigned int min, 712int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -450,7 +720,7 @@ void regcache_mark_dirty(struct regmap *map);
450bool regmap_check_range_table(struct regmap *map, unsigned int reg, 720bool regmap_check_range_table(struct regmap *map, unsigned int reg,
451 const struct regmap_access_table *table); 721 const struct regmap_access_table *table);
452 722
453int regmap_register_patch(struct regmap *map, const struct reg_default *regs, 723int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
454 int num_regs); 724 int num_regs);
455int regmap_parse_val(struct regmap *map, const void *buf, 725int regmap_parse_val(struct regmap *map, const void *buf,
456 unsigned int *val); 726 unsigned int *val);
@@ -503,6 +773,8 @@ int regmap_field_update_bits(struct regmap_field *field,
503 773
504int regmap_fields_write(struct regmap_field *field, unsigned int id, 774int regmap_fields_write(struct regmap_field *field, unsigned int id,
505 unsigned int val); 775 unsigned int val);
776int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
777 unsigned int val);
506int regmap_fields_read(struct regmap_field *field, unsigned int id, 778int regmap_fields_read(struct regmap_field *field, unsigned int id,
507 unsigned int *val); 779 unsigned int *val);
508int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 780int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
@@ -645,6 +917,13 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
645 return -EINVAL; 917 return -EINVAL;
646} 918}
647 919
920static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
921 unsigned int mask, unsigned int val)
922{
923 WARN_ONCE(1, "regmap API is disabled");
924 return -EINVAL;
925}
926
648static inline int regmap_update_bits_async(struct regmap *map, 927static inline int regmap_update_bits_async(struct regmap *map,
649 unsigned int reg, 928 unsigned int reg,
650 unsigned int mask, unsigned int val) 929 unsigned int mask, unsigned int val)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a689ed62a5..9e0e76992be0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -550,8 +550,24 @@ static inline int regulator_count_voltages(struct regulator *regulator)
550{ 550{
551 return 0; 551 return 0;
552} 552}
553
554static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector)
555{
556 return -EINVAL;
557}
558
553#endif 559#endif
554 560
561static inline int regulator_set_voltage_triplet(struct regulator *regulator,
562 int min_uV, int target_uV,
563 int max_uV)
564{
565 if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
566 return 0;
567
568 return regulator_set_voltage(regulator, min_uV, max_uV);
569}
570
555static inline int regulator_set_voltage_tol(struct regulator *regulator, 571static inline int regulator_set_voltage_tol(struct regulator *regulator,
556 int new_uV, int tol_uV) 572 int new_uV, int tol_uV)
557{ 573{
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5dd65acc2a69..a43a5ca1167b 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,16 +1,16 @@
1/* 1/*
2 * da9211.h - Regulator device driver for DA9211/DA9213 2 * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
3 * Copyright (C) 2014 Dialog Semiconductor Ltd. 3 * Copyright (C) 2015 Dialog Semiconductor Ltd.
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public 6 * modify it under the terms of the GNU General Public License
7 * License as published by the Free Software Foundation; either 7 * as published by the Free Software Foundation; either version 2
8 * version 2 of the License, or (at your option) any later version. 8 * of the License, or (at your option) any later version.
9 * 9 *
10 * This library is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * Library General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef __LINUX_REGULATOR_DA9211_H 16#ifndef __LINUX_REGULATOR_DA9211_H
@@ -23,6 +23,7 @@
23enum da9211_chip_id { 23enum da9211_chip_id {
24 DA9211, 24 DA9211,
25 DA9213, 25 DA9213,
26 DA9215,
26}; 27};
27 28
28struct da9211_pdata { 29struct da9211_pdata {
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4db9fbe4889d..45932228cbf5 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -148,6 +148,7 @@ struct regulator_ops {
148 int (*get_current_limit) (struct regulator_dev *); 148 int (*get_current_limit) (struct regulator_dev *);
149 149
150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); 150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
151 int (*set_over_current_protection) (struct regulator_dev *);
151 152
152 /* enable/disable regulator */ 153 /* enable/disable regulator */
153 int (*enable) (struct regulator_dev *); 154 int (*enable) (struct regulator_dev *);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b11be1260129..a1067d0b3991 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -147,6 +147,7 @@ struct regulation_constraints {
147 unsigned ramp_disable:1; /* disable ramp delay */ 147 unsigned ramp_disable:1; /* disable ramp delay */
148 unsigned soft_start:1; /* ramp voltage slowly */ 148 unsigned soft_start:1; /* ramp voltage slowly */
149 unsigned pull_down:1; /* pull down resistor when regulator off */ 149 unsigned pull_down:1; /* pull down resistor when regulator off */
150 unsigned over_current_protection:1; /* auto disable on over current */
150}; 151};
151 152
152/** 153/**
diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h
new file mode 100644
index 000000000000..8473259395b6
--- /dev/null
+++ b/include/linux/regulator/mt6311.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Henry Chen <henryc.chen@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __LINUX_REGULATOR_MT6311_H
16#define __LINUX_REGULATOR_MT6311_H
17
18#define MT6311_MAX_REGULATORS 2
19
20enum {
21 MT6311_ID_VDVFS = 0,
22 MT6311_ID_VBIASN,
23};
24
25#define MT6311_E1_CID_CODE 0x10
26#define MT6311_E2_CID_CODE 0x20
27#define MT6311_E3_CID_CODE 0x30
28
29#endif /* __LINUX_REGULATOR_MT6311_H */
diff --git a/include/linux/reset.h b/include/linux/reset.h
index da5602bd77d7..7f65f9cff951 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -74,6 +74,20 @@ static inline int device_reset_optional(struct device *dev)
74 return -ENOSYS; 74 return -ENOSYS;
75} 75}
76 76
77static inline struct reset_control *__must_check reset_control_get(
78 struct device *dev, const char *id)
79{
80 WARN_ON(1);
81 return ERR_PTR(-EINVAL);
82}
83
84static inline struct reset_control *__must_check devm_reset_control_get(
85 struct device *dev, const char *id)
86{
87 WARN_ON(1);
88 return ERR_PTR(-EINVAL);
89}
90
77static inline struct reset_control *reset_control_get_optional( 91static inline struct reset_control *reset_control_get_optional(
78 struct device *dev, const char *id) 92 struct device *dev, const char *id)
79{ 93{
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c89c53a113a8..29446aeef36e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,6 +89,9 @@ enum ttu_flags {
89 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ 89 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
90 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ 90 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
91 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ 91 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
92 TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
93 * and caller guarantees they will
94 * do a final flush if necessary */
92}; 95};
93 96
94#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 9b1ef0c820a7..556ec1ea2574 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
161static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 161static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
162 struct scatterlist *sgl) 162 struct scatterlist *sgl)
163{ 163{
164#ifndef CONFIG_ARCH_HAS_SG_CHAIN
165 BUG();
166#endif
167
168 /* 164 /*
169 * offset and length are unused for chain entry. Clear them. 165 * offset and length are unused for chain entry. Clear them.
170 */ 166 */
@@ -251,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *);
251struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 247struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
252void sg_init_table(struct scatterlist *, unsigned int); 248void sg_init_table(struct scatterlist *, unsigned int);
253void sg_init_one(struct scatterlist *, const void *, unsigned int); 249void sg_init_one(struct scatterlist *, const void *, unsigned int);
250int sg_split(struct scatterlist *in, const int in_mapped_nents,
251 const off_t skip, const int nb_splits,
252 const size_t *split_sizes,
253 struct scatterlist **out, int *out_mapped_nents,
254 gfp_t gfp_mask);
254 255
255typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 256typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
256typedef void (sg_free_fn)(struct scatterlist *, unsigned int); 257typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04b5ada460b4..a4ab9daa387c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -530,39 +530,49 @@ struct cpu_itimer {
530}; 530};
531 531
532/** 532/**
533 * struct cputime - snaphsot of system and user cputime 533 * struct prev_cputime - snaphsot of system and user cputime
534 * @utime: time spent in user mode 534 * @utime: time spent in user mode
535 * @stime: time spent in system mode 535 * @stime: time spent in system mode
536 * @lock: protects the above two fields
536 * 537 *
537 * Gathers a generic snapshot of user and system time. 538 * Stores previous user/system time values such that we can guarantee
539 * monotonicity.
538 */ 540 */
539struct cputime { 541struct prev_cputime {
542#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
540 cputime_t utime; 543 cputime_t utime;
541 cputime_t stime; 544 cputime_t stime;
545 raw_spinlock_t lock;
546#endif
542}; 547};
543 548
549static inline void prev_cputime_init(struct prev_cputime *prev)
550{
551#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
552 prev->utime = prev->stime = 0;
553 raw_spin_lock_init(&prev->lock);
554#endif
555}
556
544/** 557/**
545 * struct task_cputime - collected CPU time counts 558 * struct task_cputime - collected CPU time counts
546 * @utime: time spent in user mode, in &cputime_t units 559 * @utime: time spent in user mode, in &cputime_t units
547 * @stime: time spent in kernel mode, in &cputime_t units 560 * @stime: time spent in kernel mode, in &cputime_t units
548 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 561 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
549 * 562 *
550 * This is an extension of struct cputime that includes the total runtime 563 * This structure groups together three kinds of CPU time that are tracked for
551 * spent by the task from the scheduler point of view. 564 * threads and thread groups. Most things considering CPU time want to group
552 * 565 * these counts together and treat all three of them in parallel.
553 * As a result, this structure groups together three kinds of CPU time
554 * that are tracked for threads and thread groups. Most things considering
555 * CPU time want to group these counts together and treat all three
556 * of them in parallel.
557 */ 566 */
558struct task_cputime { 567struct task_cputime {
559 cputime_t utime; 568 cputime_t utime;
560 cputime_t stime; 569 cputime_t stime;
561 unsigned long long sum_exec_runtime; 570 unsigned long long sum_exec_runtime;
562}; 571};
572
563/* Alternate field names when used to cache expirations. */ 573/* Alternate field names when used to cache expirations. */
564#define prof_exp stime
565#define virt_exp utime 574#define virt_exp utime
575#define prof_exp stime
566#define sched_exp sum_exec_runtime 576#define sched_exp sum_exec_runtime
567 577
568#define INIT_CPUTIME \ 578#define INIT_CPUTIME \
@@ -715,9 +725,7 @@ struct signal_struct {
715 cputime_t utime, stime, cutime, cstime; 725 cputime_t utime, stime, cutime, cstime;
716 cputime_t gtime; 726 cputime_t gtime;
717 cputime_t cgtime; 727 cputime_t cgtime;
718#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 728 struct prev_cputime prev_cputime;
719 struct cputime prev_cputime;
720#endif
721 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 729 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
722 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 730 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
723 unsigned long inblock, oublock, cinblock, coublock; 731 unsigned long inblock, oublock, cinblock, coublock;
@@ -1167,29 +1175,24 @@ struct load_weight {
1167 u32 inv_weight; 1175 u32 inv_weight;
1168}; 1176};
1169 1177
1178/*
1179 * The load_avg/util_avg accumulates an infinite geometric series.
1180 * 1) load_avg factors the amount of time that a sched_entity is
1181 * runnable on a rq into its weight. For cfs_rq, it is the aggregated
1182 * such weights of all runnable and blocked sched_entities.
1183 * 2) util_avg factors frequency scaling into the amount of time
1184 * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
1185 * For cfs_rq, it is the aggregated such times of all runnable and
1186 * blocked sched_entities.
1187 * The 64 bit load_sum can:
1188 * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
1189 * the highest weight (=88761) always runnable, we should not overflow
1190 * 2) for entity, support any load.weight always runnable
1191 */
1170struct sched_avg { 1192struct sched_avg {
1171 u64 last_runnable_update; 1193 u64 last_update_time, load_sum;
1172 s64 decay_count; 1194 u32 util_sum, period_contrib;
1173 /* 1195 unsigned long load_avg, util_avg;
1174 * utilization_avg_contrib describes the amount of time that a
1175 * sched_entity is running on a CPU. It is based on running_avg_sum
1176 * and is scaled in the range [0..SCHED_LOAD_SCALE].
1177 * load_avg_contrib described the amount of time that a sched_entity
1178 * is runnable on a rq. It is based on both runnable_avg_sum and the
1179 * weight of the task.
1180 */
1181 unsigned long load_avg_contrib, utilization_avg_contrib;
1182 /*
1183 * These sums represent an infinite geometric series and so are bound
1184 * above by 1024/(1-y). Thus we only need a u32 to store them for all
1185 * choices of y < 1-2^(-32)*1024.
1186 * running_avg_sum reflects the time that the sched_entity is
1187 * effectively running on the CPU.
1188 * runnable_avg_sum represents the amount of time a sched_entity is on
1189 * a runqueue which includes the running time that is monitored by
1190 * running_avg_sum.
1191 */
1192 u32 runnable_avg_sum, avg_period, running_avg_sum;
1193}; 1196};
1194 1197
1195#ifdef CONFIG_SCHEDSTATS 1198#ifdef CONFIG_SCHEDSTATS
@@ -1255,7 +1258,7 @@ struct sched_entity {
1255#endif 1258#endif
1256 1259
1257#ifdef CONFIG_SMP 1260#ifdef CONFIG_SMP
1258 /* Per-entity load-tracking */ 1261 /* Per entity load average tracking */
1259 struct sched_avg avg; 1262 struct sched_avg avg;
1260#endif 1263#endif
1261}; 1264};
@@ -1341,6 +1344,25 @@ enum perf_event_task_context {
1341 perf_nr_task_contexts, 1344 perf_nr_task_contexts,
1342}; 1345};
1343 1346
1347/* Track pages that require TLB flushes */
1348struct tlbflush_unmap_batch {
1349 /*
1350 * Each bit set is a CPU that potentially has a TLB entry for one of
1351 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
1352 */
1353 struct cpumask cpumask;
1354
1355 /* True if any bit in cpumask is set */
1356 bool flush_required;
1357
1358 /*
1359 * If true then the PTE was dirty when unmapped. The entry must be
1360 * flushed before IO is initiated or a stale TLB entry potentially
1361 * allows an update without redirtying the page.
1362 */
1363 bool writable;
1364};
1365
1344struct task_struct { 1366struct task_struct {
1345 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1367 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1346 void *stack; 1368 void *stack;
@@ -1351,9 +1373,9 @@ struct task_struct {
1351#ifdef CONFIG_SMP 1373#ifdef CONFIG_SMP
1352 struct llist_node wake_entry; 1374 struct llist_node wake_entry;
1353 int on_cpu; 1375 int on_cpu;
1354 struct task_struct *last_wakee; 1376 unsigned int wakee_flips;
1355 unsigned long wakee_flips;
1356 unsigned long wakee_flip_decay_ts; 1377 unsigned long wakee_flip_decay_ts;
1378 struct task_struct *last_wakee;
1357 1379
1358 int wake_cpu; 1380 int wake_cpu;
1359#endif 1381#endif
@@ -1481,9 +1503,7 @@ struct task_struct {
1481 1503
1482 cputime_t utime, stime, utimescaled, stimescaled; 1504 cputime_t utime, stime, utimescaled, stimescaled;
1483 cputime_t gtime; 1505 cputime_t gtime;
1484#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1506 struct prev_cputime prev_cputime;
1485 struct cputime prev_cputime;
1486#endif
1487#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1507#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1488 seqlock_t vtime_seqlock; 1508 seqlock_t vtime_seqlock;
1489 unsigned long long vtime_snap; 1509 unsigned long long vtime_snap;
@@ -1699,6 +1719,10 @@ struct task_struct {
1699 unsigned long numa_pages_migrated; 1719 unsigned long numa_pages_migrated;
1700#endif /* CONFIG_NUMA_BALANCING */ 1720#endif /* CONFIG_NUMA_BALANCING */
1701 1721
1722#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1723 struct tlbflush_unmap_batch tlb_ubc;
1724#endif
1725
1702 struct rcu_head rcu; 1726 struct rcu_head rcu;
1703 1727
1704 /* 1728 /*
@@ -2214,13 +2238,6 @@ static inline void calc_load_enter_idle(void) { }
2214static inline void calc_load_exit_idle(void) { } 2238static inline void calc_load_exit_idle(void) { }
2215#endif /* CONFIG_NO_HZ_COMMON */ 2239#endif /* CONFIG_NO_HZ_COMMON */
2216 2240
2217#ifndef CONFIG_CPUMASK_OFFSTACK
2218static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2219{
2220 return set_cpus_allowed_ptr(p, &new_mask);
2221}
2222#endif
2223
2224/* 2241/*
2225 * Do not use outside of architecture code which knows its limitations. 2242 * Do not use outside of architecture code which knows its limitations.
2226 * 2243 *
@@ -2897,12 +2914,6 @@ extern int _cond_resched(void);
2897 2914
2898extern int __cond_resched_lock(spinlock_t *lock); 2915extern int __cond_resched_lock(spinlock_t *lock);
2899 2916
2900#ifdef CONFIG_PREEMPT_COUNT
2901#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2902#else
2903#define PREEMPT_LOCK_OFFSET 0
2904#endif
2905
2906#define cond_resched_lock(lock) ({ \ 2917#define cond_resched_lock(lock) ({ \
2907 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 2918 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2908 __cond_resched_lock(lock); \ 2919 __cond_resched_lock(lock); \
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index a19ddacdac30..f4265039a94c 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -78,7 +78,7 @@ static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
78 78
79static inline int seccomp_mode(struct seccomp *s) 79static inline int seccomp_mode(struct seccomp *s)
80{ 80{
81 return 0; 81 return SECCOMP_MODE_DISABLED;
82} 82}
83#endif /* CONFIG_SECCOMP */ 83#endif /* CONFIG_SECCOMP */
84 84
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 912a7c482649..adeadbd6d7bf 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -122,6 +122,10 @@ int seq_write(struct seq_file *seq, const void *data, size_t len);
122__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); 122__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
123__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); 123__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
124 124
125void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
126 int rowsize, int groupsize, const void *buf, size_t len,
127 bool ascii);
128
125int seq_path(struct seq_file *, const struct path *, const char *); 129int seq_path(struct seq_file *, const struct path *, const char *);
126int seq_file_path(struct seq_file *, struct file *, const char *); 130int seq_file_path(struct seq_file *, struct file *, const char *);
127int seq_dentry(struct seq_file *, struct dentry *, const char *); 131int seq_dentry(struct seq_file *, struct dentry *, const char *);
@@ -149,6 +153,41 @@ static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
149#endif 153#endif
150} 154}
151 155
156/**
157 * seq_show_options - display mount options with appropriate escapes.
158 * @m: the seq_file handle
159 * @name: the mount option name
160 * @value: the mount option name's value, can be NULL
161 */
162static inline void seq_show_option(struct seq_file *m, const char *name,
163 const char *value)
164{
165 seq_putc(m, ',');
166 seq_escape(m, name, ",= \t\n\\");
167 if (value) {
168 seq_putc(m, '=');
169 seq_escape(m, value, ", \t\n\\");
170 }
171}
172
173/**
174 * seq_show_option_n - display mount options with appropriate escapes
175 * where @value must be a specific length.
176 * @m: the seq_file handle
177 * @name: the mount option name
178 * @value: the mount option name's value, cannot be NULL
179 * @length: the length of @value to display
180 *
181 * This is a macro since this uses "length" to define the size of the
182 * stack buffer.
183 */
184#define seq_show_option_n(m, name, value, length) { \
185 char val_buf[length + 1]; \
186 strncpy(val_buf, value, length); \
187 val_buf[length] = '\0'; \
188 seq_show_option(m, name, val_buf); \
189}
190
152#define SEQ_START_TOKEN ((void *)1) 191#define SEQ_START_TOKEN ((void *)1)
153/* 192/*
154 * Helpers for iteration over list_head-s in seq_files 193 * Helpers for iteration over list_head-s in seq_files
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index ba82c07feb95..faa0e0370ce7 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -136,8 +136,6 @@ void serial8250_resume_port(int line);
136 136
137extern int early_serial_setup(struct uart_port *port); 137extern int early_serial_setup(struct uart_port *port);
138 138
139extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
140extern void serial8250_early_out(struct uart_port *port, int offset, int value);
141extern int early_serial8250_setup(struct earlycon_device *device, 139extern int early_serial8250_setup(struct earlycon_device *device,
142 const char *options); 140 const char *options);
143extern void serial8250_do_set_termios(struct uart_port *port, 141extern void serial8250_do_set_termios(struct uart_port *port,
@@ -152,6 +150,11 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
152unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); 150unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
153void serial8250_tx_chars(struct uart_8250_port *up); 151void serial8250_tx_chars(struct uart_8250_port *up);
154unsigned int serial8250_modem_status(struct uart_8250_port *up); 152unsigned int serial8250_modem_status(struct uart_8250_port *up);
153void serial8250_init_port(struct uart_8250_port *up);
154void serial8250_set_defaults(struct uart_8250_port *up);
155void serial8250_console_write(struct uart_8250_port *up, const char *s,
156 unsigned int count);
157int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
155 158
156extern void serial8250_set_isa_configurator(void (*v) 159extern void serial8250_set_isa_configurator(void (*v)
157 (int port, struct uart_port *up, 160 (int port, struct uart_port *up,
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 9f779c7a2da4..df4ab5de1586 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,8 @@
18#include <linux/mod_devicetable.h> 18#include <linux/mod_devicetable.h>
19#include <uapi/linux/serio.h> 19#include <uapi/linux/serio.h>
20 20
21extern struct bus_type serio_bus;
22
21struct serio { 23struct serio {
22 void *port_data; 24 void *port_data;
23 25
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index dd0ba502ccb3..d927647e6350 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -128,7 +128,10 @@ void shdma_cleanup(struct shdma_dev *sdev);
128#if IS_ENABLED(CONFIG_SH_DMAE_BASE) 128#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
129bool shdma_chan_filter(struct dma_chan *chan, void *arg); 129bool shdma_chan_filter(struct dma_chan *chan, void *arg);
130#else 130#else
131#define shdma_chan_filter NULL 131static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
132{
133 return false;
134}
132#endif 135#endif
133 136
134#endif 137#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b88536487e6..2738d355cdf9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
37#include <net/flow_dissector.h> 37#include <net/flow_dissector.h>
38#include <linux/splice.h> 38#include <linux/splice.h>
39#include <linux/in6.h> 39#include <linux/in6.h>
40#include <net/flow.h>
40 41
41/* A. Checksumming of received packets by device. 42/* A. Checksumming of received packets by device.
42 * 43 *
@@ -173,17 +174,24 @@ struct nf_bridge_info {
173 BRNF_PROTO_8021Q, 174 BRNF_PROTO_8021Q,
174 BRNF_PROTO_PPPOE 175 BRNF_PROTO_PPPOE
175 } orig_proto:8; 176 } orig_proto:8;
176 bool pkt_otherhost; 177 u8 pkt_otherhost:1;
178 u8 in_prerouting:1;
179 u8 bridged_dnat:1;
177 __u16 frag_max_size; 180 __u16 frag_max_size;
178 unsigned int mask;
179 struct net_device *physindev; 181 struct net_device *physindev;
180 union { 182 union {
181 struct net_device *physoutdev; 183 /* prerouting: detect dnat in orig/reply direction */
182 char neigh_header[8];
183 };
184 union {
185 __be32 ipv4_daddr; 184 __be32 ipv4_daddr;
186 struct in6_addr ipv6_daddr; 185 struct in6_addr ipv6_daddr;
186
187 /* after prerouting + nat detected: store original source
188 * mac since neigh resolution overwrites it, only used while
189 * skb is out in neigh layer.
190 */
191 char neigh_header[8];
192
193 /* always valid & non-NULL from FORWARD on, for physdev match */
194 struct net_device *physoutdev;
187 }; 195 };
188}; 196};
189#endif 197#endif
@@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
506 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 514 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
507 * @napi_id: id of the NAPI struct this skb came from 515 * @napi_id: id of the NAPI struct this skb came from
508 * @secmark: security marking 516 * @secmark: security marking
517 * @offload_fwd_mark: fwding offload mark
509 * @mark: Generic packet mark 518 * @mark: Generic packet mark
510 * @vlan_proto: vlan encapsulation protocol 519 * @vlan_proto: vlan encapsulation protocol
511 * @vlan_tci: vlan tag control information 520 * @vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@ struct sk_buff {
650 unsigned int sender_cpu; 659 unsigned int sender_cpu;
651 }; 660 };
652#endif 661#endif
662 union {
653#ifdef CONFIG_NETWORK_SECMARK 663#ifdef CONFIG_NETWORK_SECMARK
654 __u32 secmark; 664 __u32 secmark;
665#endif
666#ifdef CONFIG_NET_SWITCHDEV
667 __u32 offload_fwd_mark;
655#endif 668#endif
669 };
670
656 union { 671 union {
657 __u32 mark; 672 __u32 mark;
658 __u32 reserved_tailroom; 673 __u32 reserved_tailroom;
@@ -922,14 +937,90 @@ enum pkt_hash_types {
922 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ 937 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
923}; 938};
924 939
925static inline void 940static inline void skb_clear_hash(struct sk_buff *skb)
926skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
927{ 941{
928 skb->l4_hash = (type == PKT_HASH_TYPE_L4); 942 skb->hash = 0;
929 skb->sw_hash = 0; 943 skb->sw_hash = 0;
944 skb->l4_hash = 0;
945}
946
947static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
948{
949 if (!skb->l4_hash)
950 skb_clear_hash(skb);
951}
952
953static inline void
954__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
955{
956 skb->l4_hash = is_l4;
957 skb->sw_hash = is_sw;
930 skb->hash = hash; 958 skb->hash = hash;
931} 959}
932 960
961static inline void
962skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
963{
964 /* Used by drivers to set hash from HW */
965 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
966}
967
968static inline void
969__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
970{
971 __skb_set_hash(skb, hash, true, is_l4);
972}
973
974void __skb_get_hash(struct sk_buff *skb);
975u32 skb_get_poff(const struct sk_buff *skb);
976u32 __skb_get_poff(const struct sk_buff *skb, void *data,
977 const struct flow_keys *keys, int hlen);
978__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
979 void *data, int hlen_proto);
980
981static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
982 int thoff, u8 ip_proto)
983{
984 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
985}
986
987void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
988 const struct flow_dissector_key *key,
989 unsigned int key_count);
990
991bool __skb_flow_dissect(const struct sk_buff *skb,
992 struct flow_dissector *flow_dissector,
993 void *target_container,
994 void *data, __be16 proto, int nhoff, int hlen,
995 unsigned int flags);
996
997static inline bool skb_flow_dissect(const struct sk_buff *skb,
998 struct flow_dissector *flow_dissector,
999 void *target_container, unsigned int flags)
1000{
1001 return __skb_flow_dissect(skb, flow_dissector, target_container,
1002 NULL, 0, 0, 0, flags);
1003}
1004
1005static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1006 struct flow_keys *flow,
1007 unsigned int flags)
1008{
1009 memset(flow, 0, sizeof(*flow));
1010 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1011 NULL, 0, 0, 0, flags);
1012}
1013
1014static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1015 void *data, __be16 proto,
1016 int nhoff, int hlen,
1017 unsigned int flags)
1018{
1019 memset(flow, 0, sizeof(*flow));
1020 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1021 data, proto, nhoff, hlen, flags);
1022}
1023
933static inline __u32 skb_get_hash(struct sk_buff *skb) 1024static inline __u32 skb_get_hash(struct sk_buff *skb)
934{ 1025{
935 if (!skb->l4_hash && !skb->sw_hash) 1026 if (!skb->l4_hash && !skb->sw_hash)
@@ -938,24 +1029,39 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
938 return skb->hash; 1029 return skb->hash;
939} 1030}
940 1031
941__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); 1032__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
942 1033
943static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 1034static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
944{ 1035{
1036 if (!skb->l4_hash && !skb->sw_hash) {
1037 struct flow_keys keys;
1038 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1039
1040 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1041 }
1042
945 return skb->hash; 1043 return skb->hash;
946} 1044}
947 1045
948static inline void skb_clear_hash(struct sk_buff *skb) 1046__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1047
1048static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
949{ 1049{
950 skb->hash = 0; 1050 if (!skb->l4_hash && !skb->sw_hash) {
951 skb->sw_hash = 0; 1051 struct flow_keys keys;
952 skb->l4_hash = 0; 1052 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1053
1054 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1055 }
1056
1057 return skb->hash;
953} 1058}
954 1059
955static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) 1060__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1061
1062static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
956{ 1063{
957 if (!skb->l4_hash) 1064 return skb->hash;
958 skb_clear_hash(skb);
959} 1065}
960 1066
961static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) 1067static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
@@ -1943,7 +2049,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
1943 2049
1944 if (skb_transport_header_was_set(skb)) 2050 if (skb_transport_header_was_set(skb))
1945 return; 2051 return;
1946 else if (skb_flow_dissect_flow_keys(skb, &keys)) 2052 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
1947 skb_set_transport_header(skb, keys.control.thoff); 2053 skb_set_transport_header(skb, keys.control.thoff);
1948 else 2054 else
1949 skb_set_transport_header(skb, offset_hint); 2055 skb_set_transport_header(skb, offset_hint);
@@ -2667,12 +2773,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
2667 skb_shinfo(skb)->frag_list = NULL; 2773 skb_shinfo(skb)->frag_list = NULL;
2668} 2774}
2669 2775
2670static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2671{
2672 frag->next = skb_shinfo(skb)->frag_list;
2673 skb_shinfo(skb)->frag_list = frag;
2674}
2675
2676#define skb_walk_frags(skb, iter) \ 2776#define skb_walk_frags(skb, iter) \
2677 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2777 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2678 2778
@@ -3464,5 +3564,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3464 skb_network_header(skb); 3564 skb_network_header(skb);
3465 return hdr_len + skb_gso_transport_seglen(skb); 3565 return hdr_len + skb_gso_transport_seglen(skb);
3466} 3566}
3567
3467#endif /* __KERNEL__ */ 3568#endif /* __KERNEL__ */
3468#endif /* _LINUX_SKBUFF_H */ 3569#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a99f0e5243e1..7e37d448ed91 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -290,6 +290,16 @@ void *__kmalloc(size_t size, gfp_t flags);
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
291void kmem_cache_free(struct kmem_cache *, void *); 291void kmem_cache_free(struct kmem_cache *, void *);
292 292
293/*
294 * Bulk allocation and freeing operations. These are accellerated in an
295 * allocator specific way to avoid taking locks repeatedly or building
296 * metadata structures unnecessarily.
297 *
298 * Note that interrupts must be enabled when calling these functions.
299 */
300void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
301bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
302
293#ifdef CONFIG_NUMA 303#ifdef CONFIG_NUMA
294void *__kmalloc_node(size_t size, gfp_t flags, int node); 304void *__kmalloc_node(size_t size, gfp_t flags, int node);
295void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 305void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index da3c593f9845..e6109a6cd8f6 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -48,7 +48,16 @@ struct smp_hotplug_thread {
48 const char *thread_comm; 48 const char *thread_comm;
49}; 49};
50 50
51int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); 51int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
52 const struct cpumask *cpumask);
53
54static inline int
55smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
56{
57 return smpboot_register_percpu_thread_cpumask(plug_thread,
58 cpu_possible_mask);
59}
60
52void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); 61void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
53int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, 62int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
54 const struct cpumask *); 63 const struct cpumask *);
diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h
new file mode 100644
index 000000000000..9c99f84bcc0e
--- /dev/null
+++ b/include/linux/soc/dove/pmu.h
@@ -0,0 +1,6 @@
1#ifndef LINUX_SOC_DOVE_PMU_H
2#define LINUX_SOC_DOVE_PMU_H
3
4int dove_init_pmu(void);
5
6#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
new file mode 100644
index 000000000000..a5714e93fb34
--- /dev/null
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -0,0 +1,26 @@
1#ifndef __SOC_MEDIATEK_INFRACFG_H
2#define __SOC_MEDIATEK_INFRACFG_H
3
4#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0)
5#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1)
6#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2)
7#define MT8173_TOP_AXI_PROT_EN_MMAPB_S BIT(6)
8#define MT8173_TOP_AXI_PROT_EN_L2C_M2 BIT(9)
9#define MT8173_TOP_AXI_PROT_EN_L2SS_SMI BIT(11)
10#define MT8173_TOP_AXI_PROT_EN_L2SS_ADD BIT(12)
11#define MT8173_TOP_AXI_PROT_EN_CCI_M2 BIT(13)
12#define MT8173_TOP_AXI_PROT_EN_MFG_S BIT(14)
13#define MT8173_TOP_AXI_PROT_EN_PERI_M0 BIT(15)
14#define MT8173_TOP_AXI_PROT_EN_PERI_M1 BIT(16)
15#define MT8173_TOP_AXI_PROT_EN_DEBUGSYS BIT(17)
16#define MT8173_TOP_AXI_PROT_EN_CQ_DMA BIT(18)
17#define MT8173_TOP_AXI_PROT_EN_GCPU BIT(19)
18#define MT8173_TOP_AXI_PROT_EN_IOMMU BIT(20)
19#define MT8173_TOP_AXI_PROT_EN_MFG_M0 BIT(21)
20#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
21#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
22
23int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
24int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
25
26#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
new file mode 100644
index 000000000000..2a53dcaeeeed
--- /dev/null
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -0,0 +1,35 @@
1#ifndef __QCOM_SMD_RPM_H__
2#define __QCOM_SMD_RPM_H__
3
4struct qcom_smd_rpm;
5
6#define QCOM_SMD_RPM_ACTIVE_STATE 0
7#define QCOM_SMD_RPM_SLEEP_STATE 1
8
9/*
10 * Constants used for addressing resources in the RPM.
11 */
12#define QCOM_SMD_RPM_BOOST 0x61747362
13#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
14#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62
15#define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362
16#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
17#define QCOM_SMD_RPM_LDOA 0x616f646c
18#define QCOM_SMD_RPM_LDOB 0x626F646C
19#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
20#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
21#define QCOM_SMD_RPM_NCPA 0x6170636E
22#define QCOM_SMD_RPM_NCPB 0x6270636E
23#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
24#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
25#define QCOM_SMD_RPM_SMPA 0x61706d73
26#define QCOM_SMD_RPM_SMPB 0x62706d73
27#define QCOM_SMD_RPM_SPDM 0x63707362
28#define QCOM_SMD_RPM_VSA 0x00617376
29
30int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
31 int state,
32 u32 resource_type, u32 resource_id,
33 void *buf, size_t count);
34
35#endif
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
new file mode 100644
index 000000000000..d7e50aa6a4ac
--- /dev/null
+++ b/include/linux/soc/qcom/smd.h
@@ -0,0 +1,46 @@
1#ifndef __QCOM_SMD_H__
2#define __QCOM_SMD_H__
3
4#include <linux/device.h>
5#include <linux/mod_devicetable.h>
6
7struct qcom_smd;
8struct qcom_smd_channel;
9struct qcom_smd_lookup;
10
11/**
12 * struct qcom_smd_device - smd device struct
13 * @dev: the device struct
14 * @channel: handle to the smd channel for this device
15 */
16struct qcom_smd_device {
17 struct device dev;
18 struct qcom_smd_channel *channel;
19};
20
21/**
22 * struct qcom_smd_driver - smd driver struct
23 * @driver: underlying device driver
24 * @probe: invoked when the smd channel is found
25 * @remove: invoked when the smd channel is closed
26 * @callback: invoked when an inbound message is received on the channel,
27 * should return 0 on success or -EBUSY if the data cannot be
28 * consumed at this time
29 */
30struct qcom_smd_driver {
31 struct device_driver driver;
32 int (*probe)(struct qcom_smd_device *dev);
33 void (*remove)(struct qcom_smd_device *dev);
34 int (*callback)(struct qcom_smd_device *, const void *, size_t);
35};
36
37int qcom_smd_driver_register(struct qcom_smd_driver *drv);
38void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
39
40#define module_qcom_smd_driver(__smd_driver) \
41 module_driver(__smd_driver, qcom_smd_driver_register, \
42 qcom_smd_driver_unregister)
43
44int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
45
46#endif
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
new file mode 100644
index 000000000000..bc9630d3aced
--- /dev/null
+++ b/include/linux/soc/qcom/smem.h
@@ -0,0 +1,11 @@
1#ifndef __QCOM_SMEM_H__
2#define __QCOM_SMEM_H__
3
4#define QCOM_SMEM_HOST_ANY -1
5
6int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
7int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
8
9int qcom_smem_get_free_space(unsigned host);
10
11#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d673072346f2..269e8afd3e2a 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,8 @@
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24 24
25struct dma_chan; 25struct dma_chan;
26struct spi_master;
27struct spi_transfer;
26 28
27/* 29/*
28 * INTERFACES between SPI master-side drivers and SPI infrastructure. 30 * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -31,6 +33,59 @@ struct dma_chan;
31extern struct bus_type spi_bus_type; 33extern struct bus_type spi_bus_type;
32 34
33/** 35/**
36 * struct spi_statistics - statistics for spi transfers
37 * @clock: lock protecting this structure
38 *
39 * @messages: number of spi-messages handled
40 * @transfers: number of spi_transfers handled
41 * @errors: number of errors during spi_transfer
42 * @timedout: number of timeouts during spi_transfer
43 *
44 * @spi_sync: number of times spi_sync is used
45 * @spi_sync_immediate:
46 * number of times spi_sync is executed immediately
47 * in calling context without queuing and scheduling
48 * @spi_async: number of times spi_async is used
49 *
50 * @bytes: number of bytes transferred to/from device
51 * @bytes_tx: number of bytes sent to device
52 * @bytes_rx: number of bytes received from device
53 *
54 */
55struct spi_statistics {
56 spinlock_t lock; /* lock for the whole structure */
57
58 unsigned long messages;
59 unsigned long transfers;
60 unsigned long errors;
61 unsigned long timedout;
62
63 unsigned long spi_sync;
64 unsigned long spi_sync_immediate;
65 unsigned long spi_async;
66
67 unsigned long long bytes;
68 unsigned long long bytes_rx;
69 unsigned long long bytes_tx;
70
71};
72
73void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
74 struct spi_transfer *xfer,
75 struct spi_master *master);
76
77#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
78 do { \
79 unsigned long flags; \
80 spin_lock_irqsave(&(stats)->lock, flags); \
81 (stats)->field += count; \
82 spin_unlock_irqrestore(&(stats)->lock, flags); \
83 } while (0)
84
85#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
86 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
87
88/**
34 * struct spi_device - Master side proxy for an SPI slave device 89 * struct spi_device - Master side proxy for an SPI slave device
35 * @dev: Driver model representation of the device. 90 * @dev: Driver model representation of the device.
36 * @master: SPI controller used with the device. 91 * @master: SPI controller used with the device.
@@ -60,6 +115,8 @@ extern struct bus_type spi_bus_type;
60 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when 115 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
61 * when not using a GPIO line) 116 * when not using a GPIO line)
62 * 117 *
118 * @statistics: statistics for the spi_device
119 *
63 * A @spi_device is used to interchange data between an SPI slave 120 * A @spi_device is used to interchange data between an SPI slave
64 * (usually a discrete chip) and CPU memory. 121 * (usually a discrete chip) and CPU memory.
65 * 122 *
@@ -98,6 +155,9 @@ struct spi_device {
98 char modalias[SPI_NAME_SIZE]; 155 char modalias[SPI_NAME_SIZE];
99 int cs_gpio; /* chip select gpio */ 156 int cs_gpio; /* chip select gpio */
100 157
158 /* the statistics */
159 struct spi_statistics statistics;
160
101 /* 161 /*
102 * likely need more hooks for more protocol options affecting how 162 * likely need more hooks for more protocol options affecting how
103 * the controller talks to each chip, like: 163 * the controller talks to each chip, like:
@@ -296,6 +356,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
296 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 356 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
297 * number. Any individual value may be -ENOENT for CS lines that 357 * number. Any individual value may be -ENOENT for CS lines that
298 * are not GPIOs (driven by the SPI controller itself). 358 * are not GPIOs (driven by the SPI controller itself).
359 * @statistics: statistics for the spi_master
299 * @dma_tx: DMA transmit channel 360 * @dma_tx: DMA transmit channel
300 * @dma_rx: DMA receive channel 361 * @dma_rx: DMA receive channel
301 * @dummy_rx: dummy receive buffer for full-duplex devices 362 * @dummy_rx: dummy receive buffer for full-duplex devices
@@ -452,6 +513,9 @@ struct spi_master {
452 /* gpio chip select */ 513 /* gpio chip select */
453 int *cs_gpios; 514 int *cs_gpios;
454 515
516 /* statistics */
517 struct spi_statistics statistics;
518
455 /* DMA channels for use with core dmaengine helpers */ 519 /* DMA channels for use with core dmaengine helpers */
456 struct dma_chan *dma_tx; 520 struct dma_chan *dma_tx;
457 struct dma_chan *dma_rx; 521 struct dma_chan *dma_rx;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0063b24b4f36..47dd0cebd204 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,16 +130,6 @@ do { \
130#define smp_mb__before_spinlock() smp_wmb() 130#define smp_mb__before_spinlock() smp_wmb()
131#endif 131#endif
132 132
133/*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139#ifndef smp_mb__after_unlock_lock
140#define smp_mb__after_unlock_lock() do { } while (0)
141#endif
142
143/** 133/**
144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked 134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
145 * @lock: the spinlock in question. 135 * @lock: the spinlock in question.
@@ -296,7 +286,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
296 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 286 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
297 */ 287 */
298 288
299static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 289static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
300{ 290{
301 return &lock->rlock; 291 return &lock->rlock;
302} 292}
@@ -307,17 +297,17 @@ do { \
307 raw_spin_lock_init(&(_lock)->rlock); \ 297 raw_spin_lock_init(&(_lock)->rlock); \
308} while (0) 298} while (0)
309 299
310static inline void spin_lock(spinlock_t *lock) 300static __always_inline void spin_lock(spinlock_t *lock)
311{ 301{
312 raw_spin_lock(&lock->rlock); 302 raw_spin_lock(&lock->rlock);
313} 303}
314 304
315static inline void spin_lock_bh(spinlock_t *lock) 305static __always_inline void spin_lock_bh(spinlock_t *lock)
316{ 306{
317 raw_spin_lock_bh(&lock->rlock); 307 raw_spin_lock_bh(&lock->rlock);
318} 308}
319 309
320static inline int spin_trylock(spinlock_t *lock) 310static __always_inline int spin_trylock(spinlock_t *lock)
321{ 311{
322 return raw_spin_trylock(&lock->rlock); 312 return raw_spin_trylock(&lock->rlock);
323} 313}
@@ -337,7 +327,7 @@ do { \
337 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
338} while (0) 328} while (0)
339 329
340static inline void spin_lock_irq(spinlock_t *lock) 330static __always_inline void spin_lock_irq(spinlock_t *lock)
341{ 331{
342 raw_spin_lock_irq(&lock->rlock); 332 raw_spin_lock_irq(&lock->rlock);
343} 333}
@@ -352,32 +342,32 @@ do { \
352 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 342 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
353} while (0) 343} while (0)
354 344
355static inline void spin_unlock(spinlock_t *lock) 345static __always_inline void spin_unlock(spinlock_t *lock)
356{ 346{
357 raw_spin_unlock(&lock->rlock); 347 raw_spin_unlock(&lock->rlock);
358} 348}
359 349
360static inline void spin_unlock_bh(spinlock_t *lock) 350static __always_inline void spin_unlock_bh(spinlock_t *lock)
361{ 351{
362 raw_spin_unlock_bh(&lock->rlock); 352 raw_spin_unlock_bh(&lock->rlock);
363} 353}
364 354
365static inline void spin_unlock_irq(spinlock_t *lock) 355static __always_inline void spin_unlock_irq(spinlock_t *lock)
366{ 356{
367 raw_spin_unlock_irq(&lock->rlock); 357 raw_spin_unlock_irq(&lock->rlock);
368} 358}
369 359
370static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 360static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
371{ 361{
372 raw_spin_unlock_irqrestore(&lock->rlock, flags); 362 raw_spin_unlock_irqrestore(&lock->rlock, flags);
373} 363}
374 364
375static inline int spin_trylock_bh(spinlock_t *lock) 365static __always_inline int spin_trylock_bh(spinlock_t *lock)
376{ 366{
377 return raw_spin_trylock_bh(&lock->rlock); 367 return raw_spin_trylock_bh(&lock->rlock);
378} 368}
379 369
380static inline int spin_trylock_irq(spinlock_t *lock) 370static __always_inline int spin_trylock_irq(spinlock_t *lock)
381{ 371{
382 return raw_spin_trylock_irq(&lock->rlock); 372 return raw_spin_trylock_irq(&lock->rlock);
383} 373}
@@ -387,22 +377,22 @@ static inline int spin_trylock_irq(spinlock_t *lock)
387 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 377 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
388}) 378})
389 379
390static inline void spin_unlock_wait(spinlock_t *lock) 380static __always_inline void spin_unlock_wait(spinlock_t *lock)
391{ 381{
392 raw_spin_unlock_wait(&lock->rlock); 382 raw_spin_unlock_wait(&lock->rlock);
393} 383}
394 384
395static inline int spin_is_locked(spinlock_t *lock) 385static __always_inline int spin_is_locked(spinlock_t *lock)
396{ 386{
397 return raw_spin_is_locked(&lock->rlock); 387 return raw_spin_is_locked(&lock->rlock);
398} 388}
399 389
400static inline int spin_is_contended(spinlock_t *lock) 390static __always_inline int spin_is_contended(spinlock_t *lock)
401{ 391{
402 return raw_spin_is_contended(&lock->rlock); 392 return raw_spin_is_contended(&lock->rlock);
403} 393}
404 394
405static inline int spin_can_lock(spinlock_t *lock) 395static __always_inline int spin_can_lock(spinlock_t *lock)
406{ 396{
407 return raw_spin_can_lock(&lock->rlock); 397 return raw_spin_can_lock(&lock->rlock);
408} 398}
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c91eea..eead8ab93c0a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
119 int rx_fifo_size; 119 int rx_fifo_size;
120 void (*fix_mac_speed)(void *priv, unsigned int speed); 120 void (*fix_mac_speed)(void *priv, unsigned int speed);
121 void (*bus_setup)(void __iomem *ioaddr); 121 void (*bus_setup)(void __iomem *ioaddr);
122 void *(*setup)(struct platform_device *pdev);
123 void (*free)(struct platform_device *pdev, void *priv);
124 int (*init)(struct platform_device *pdev, void *priv); 122 int (*init)(struct platform_device *pdev, void *priv);
125 void (*exit)(struct platform_device *pdev, void *priv); 123 void (*exit)(struct platform_device *pdev, void *priv);
126 void *custom_cfg;
127 void *custom_data;
128 void *bsp_priv; 124 void *bsp_priv;
129}; 125};
130
131/* of_data for SoC glue layer device tree bindings */
132
133struct stmmac_of_data {
134 int has_gmac;
135 int enh_desc;
136 int tx_coe;
137 int rx_coe;
138 int bugged_jumbo;
139 int pmt;
140 int riwt_off;
141 void (*fix_mac_speed)(void *priv, unsigned int speed);
142 void (*bus_setup)(void __iomem *ioaddr);
143 void *(*setup)(struct platform_device *pdev);
144 void (*free)(struct platform_device *pdev, void *priv);
145 int (*init)(struct platform_device *pdev, void *priv);
146 void (*exit)(struct platform_device *pdev, void *priv);
147};
148#endif 126#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index d2abbdb8c6aa..414d924318ce 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -112,25 +112,13 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
112 * 112 *
113 * This can be thought of as a very heavy write lock, equivalent to 113 * This can be thought of as a very heavy write lock, equivalent to
114 * grabbing every spinlock in the kernel. */ 114 * grabbing every spinlock in the kernel. */
115int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 115int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
116 116
117/** 117int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
118 * __stop_machine: freeze the machine on all CPUs and run this function
119 * @fn: the function to run
120 * @data: the data ptr for the @fn
121 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
122 *
123 * Description: This is a special version of the above, which assumes cpus
124 * won't come or go while it's being called. Used by hotplug cpu.
125 */
126int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
127
128int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
129 const struct cpumask *cpus); 118 const struct cpumask *cpus);
130
131#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 119#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
132 120
133static inline int __stop_machine(int (*fn)(void *), void *data, 121static inline int stop_machine(cpu_stop_fn_t fn, void *data,
134 const struct cpumask *cpus) 122 const struct cpumask *cpus)
135{ 123{
136 unsigned long flags; 124 unsigned long flags;
@@ -141,16 +129,10 @@ static inline int __stop_machine(int (*fn)(void *), void *data,
141 return ret; 129 return ret;
142} 130}
143 131
144static inline int stop_machine(int (*fn)(void *), void *data, 132static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
145 const struct cpumask *cpus)
146{
147 return __stop_machine(fn, data, cpus);
148}
149
150static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
151 const struct cpumask *cpus) 133 const struct cpumask *cpus)
152{ 134{
153 return __stop_machine(fn, data, cpus); 135 return stop_machine(fn, data, cpus);
154} 136}
155 137
156#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 138#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 71f711db4500..dabe643eb5fa 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -48,24 +48,24 @@ static inline int string_unescape_any_inplace(char *buf)
48#define ESCAPE_HEX 0x20 48#define ESCAPE_HEX 0x20
49 49
50int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, 50int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
51 unsigned int flags, const char *esc); 51 unsigned int flags, const char *only);
52 52
53static inline int string_escape_mem_any_np(const char *src, size_t isz, 53static inline int string_escape_mem_any_np(const char *src, size_t isz,
54 char *dst, size_t osz, const char *esc) 54 char *dst, size_t osz, const char *only)
55{ 55{
56 return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc); 56 return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only);
57} 57}
58 58
59static inline int string_escape_str(const char *src, char *dst, size_t sz, 59static inline int string_escape_str(const char *src, char *dst, size_t sz,
60 unsigned int flags, const char *esc) 60 unsigned int flags, const char *only)
61{ 61{
62 return string_escape_mem(src, strlen(src), dst, sz, flags, esc); 62 return string_escape_mem(src, strlen(src), dst, sz, flags, only);
63} 63}
64 64
65static inline int string_escape_str_any_np(const char *src, char *dst, 65static inline int string_escape_str_any_np(const char *src, char *dst,
66 size_t sz, const char *esc) 66 size_t sz, const char *only)
67{ 67{
68 return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc); 68 return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
69} 69}
70 70
71#endif 71#endif
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
index 07d8e53bedfc..5c9c6cd08d3b 100644
--- a/include/linux/sunrpc/addr.h
+++ b/include/linux/sunrpc/addr.h
@@ -46,8 +46,8 @@ static inline void rpc_set_port(struct sockaddr *sap,
46#define IPV6_SCOPE_DELIMITER '%' 46#define IPV6_SCOPE_DELIMITER '%'
47#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") 47#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
48 48
49static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, 49static inline bool rpc_cmp_addr4(const struct sockaddr *sap1,
50 const struct sockaddr *sap2) 50 const struct sockaddr *sap2)
51{ 51{
52 const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; 52 const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
53 const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; 53 const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
@@ -67,8 +67,8 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
67} 67}
68 68
69#if IS_ENABLED(CONFIG_IPV6) 69#if IS_ENABLED(CONFIG_IPV6)
70static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 70static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
71 const struct sockaddr *sap2) 71 const struct sockaddr *sap2)
72{ 72{
73 const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; 73 const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
74 const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; 74 const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
@@ -93,7 +93,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
93 return true; 93 return true;
94} 94}
95#else /* !(IS_ENABLED(CONFIG_IPV6) */ 95#else /* !(IS_ENABLED(CONFIG_IPV6) */
96static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 96static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
97 const struct sockaddr *sap2) 97 const struct sockaddr *sap2)
98{ 98{
99 return false; 99 return false;
@@ -122,15 +122,28 @@ static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
122 if (sap1->sa_family == sap2->sa_family) { 122 if (sap1->sa_family == sap2->sa_family) {
123 switch (sap1->sa_family) { 123 switch (sap1->sa_family) {
124 case AF_INET: 124 case AF_INET:
125 return __rpc_cmp_addr4(sap1, sap2); 125 return rpc_cmp_addr4(sap1, sap2);
126 case AF_INET6: 126 case AF_INET6:
127 return __rpc_cmp_addr6(sap1, sap2); 127 return rpc_cmp_addr6(sap1, sap2);
128 } 128 }
129 } 129 }
130 return false; 130 return false;
131} 131}
132 132
133/** 133/**
134 * rpc_cmp_addr_port - compare the address and port number of two sockaddrs.
135 * @sap1: first sockaddr
136 * @sap2: second sockaddr
137 */
138static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1,
139 const struct sockaddr *sap2)
140{
141 if (!rpc_cmp_addr(sap1, sap2))
142 return false;
143 return rpc_get_port(sap1) == rpc_get_port(sap2);
144}
145
146/**
134 * rpc_copy_addr - copy the address portion of one sockaddr to another 147 * rpc_copy_addr - copy the address portion of one sockaddr to another
135 * @dst: destination sockaddr 148 * @dst: destination sockaddr
136 * @src: source sockaddr 149 * @src: source sockaddr
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index a7cbb570cc5c..1ecf13e148b8 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -18,9 +18,13 @@
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
20#include <linux/uidgid.h> 20#include <linux/uidgid.h>
21#include <linux/utsname.h>
21 22
22/* size of the nodename buffer */ 23/*
23#define UNX_MAXNODENAME 32 24 * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
25 * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
26 */
27#define UNX_MAXNODENAME __NEW_UTS_LEN
24 28
25struct rpcsec_gss_info; 29struct rpcsec_gss_info;
26 30
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 437ddb6c4aef..03d3b4c92d9f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -46,7 +46,7 @@
46 * 46 *
47 */ 47 */
48struct cache_head { 48struct cache_head {
49 struct cache_head * next; 49 struct hlist_node cache_list;
50 time_t expiry_time; /* After time time, don't use the data */ 50 time_t expiry_time; /* After time time, don't use the data */
51 time_t last_refresh; /* If CACHE_PENDING, this is when upcall 51 time_t last_refresh; /* If CACHE_PENDING, this is when upcall
52 * was sent, else this is when update was received 52 * was sent, else this is when update was received
@@ -73,7 +73,7 @@ struct cache_detail_pipefs {
73struct cache_detail { 73struct cache_detail {
74 struct module * owner; 74 struct module * owner;
75 int hash_size; 75 int hash_size;
76 struct cache_head ** hash_table; 76 struct hlist_head * hash_table;
77 rwlock_t hash_lock; 77 rwlock_t hash_lock;
78 78
79 atomic_t inuse; /* active user-space update or lookup */ 79 atomic_t inuse; /* active user-space update or lookup */
@@ -224,6 +224,11 @@ extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
224 umode_t, struct cache_detail *); 224 umode_t, struct cache_detail *);
225extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); 225extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
226 226
227/* Must store cache_detail in seq_file->private if using next three functions */
228extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
229extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
230extern void cache_seq_stop(struct seq_file *file, void *p);
231
227extern void qword_add(char **bpp, int *lp, char *str); 232extern void qword_add(char **bpp, int *lp, char *str);
228extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); 233extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
229extern int qword_get(char **bpp, char *dest, int bufsize); 234extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fae6fb947fc8..cc0fc712bb82 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -19,11 +19,6 @@
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21 21
22/*
23 * This is the RPC server thread function prototype
24 */
25typedef int (*svc_thread_fn)(void *);
26
27/* statistics for svc_pool structures */ 22/* statistics for svc_pool structures */
28struct svc_pool_stats { 23struct svc_pool_stats {
29 atomic_long_t packets; 24 atomic_long_t packets;
@@ -54,6 +49,25 @@ struct svc_pool {
54 unsigned long sp_flags; 49 unsigned long sp_flags;
55} ____cacheline_aligned_in_smp; 50} ____cacheline_aligned_in_smp;
56 51
52struct svc_serv;
53
54struct svc_serv_ops {
55 /* Callback to use when last thread exits. */
56 void (*svo_shutdown)(struct svc_serv *, struct net *);
57
58 /* function for service threads to run */
59 int (*svo_function)(void *);
60
61 /* queue up a transport for servicing */
62 void (*svo_enqueue_xprt)(struct svc_xprt *);
63
64 /* set up thread (or whatever) execution context */
65 int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
66
67 /* optional module to count when adding threads (pooled svcs only) */
68 struct module *svo_module;
69};
70
57/* 71/*
58 * RPC service. 72 * RPC service.
59 * 73 *
@@ -85,16 +99,7 @@ struct svc_serv {
85 99
86 unsigned int sv_nrpools; /* number of thread pools */ 100 unsigned int sv_nrpools; /* number of thread pools */
87 struct svc_pool * sv_pools; /* array of thread pools */ 101 struct svc_pool * sv_pools; /* array of thread pools */
88 102 struct svc_serv_ops *sv_ops; /* server operations */
89 void (*sv_shutdown)(struct svc_serv *serv,
90 struct net *net);
91 /* Callback to use when last thread
92 * exits.
93 */
94
95 struct module * sv_module; /* optional module to count when
96 * adding threads */
97 svc_thread_fn sv_function; /* main function for threads */
98#if defined(CONFIG_SUNRPC_BACKCHANNEL) 103#if defined(CONFIG_SUNRPC_BACKCHANNEL)
99 struct list_head sv_cb_list; /* queue for callback requests 104 struct list_head sv_cb_list; /* queue for callback requests
100 * that arrive over the same 105 * that arrive over the same
@@ -423,19 +428,46 @@ struct svc_procedure {
423}; 428};
424 429
425/* 430/*
431 * Mode for mapping cpus to pools.
432 */
433enum {
434 SVC_POOL_AUTO = -1, /* choose one of the others */
435 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
436 * (legacy & UP mode) */
437 SVC_POOL_PERCPU, /* one pool per cpu */
438 SVC_POOL_PERNODE /* one pool per numa node */
439};
440
441struct svc_pool_map {
442 int count; /* How many svc_servs use us */
443 int mode; /* Note: int not enum to avoid
444 * warnings about "enumeration value
445 * not handled in switch" */
446 unsigned int npools;
447 unsigned int *pool_to; /* maps pool id to cpu or node */
448 unsigned int *to_pool; /* maps cpu or node to pool id */
449};
450
451extern struct svc_pool_map svc_pool_map;
452
453/*
426 * Function prototypes. 454 * Function prototypes.
427 */ 455 */
428int svc_rpcb_setup(struct svc_serv *serv, struct net *net); 456int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
429void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); 457void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
430int svc_bind(struct svc_serv *serv, struct net *net); 458int svc_bind(struct svc_serv *serv, struct net *net);
431struct svc_serv *svc_create(struct svc_program *, unsigned int, 459struct svc_serv *svc_create(struct svc_program *, unsigned int,
432 void (*shutdown)(struct svc_serv *, struct net *net)); 460 struct svc_serv_ops *);
461struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
462 struct svc_pool *pool, int node);
433struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 463struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
434 struct svc_pool *pool, int node); 464 struct svc_pool *pool, int node);
465void svc_rqst_free(struct svc_rqst *);
435void svc_exit_thread(struct svc_rqst *); 466void svc_exit_thread(struct svc_rqst *);
467unsigned int svc_pool_map_get(void);
468void svc_pool_map_put(void);
436struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, 469struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
437 void (*shutdown)(struct svc_serv *, struct net *net), 470 struct svc_serv_ops *);
438 svc_thread_fn, struct module *);
439int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); 471int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
440int svc_pool_stats_open(struct svc_serv *serv, struct file *file); 472int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
441void svc_destroy(struct svc_serv *); 473void svc_destroy(struct svc_serv *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cb94ee4181d4..7ccc961f33e9 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -132,6 +132,7 @@ struct svcxprt_rdma {
132 struct list_head sc_accept_q; /* Conn. waiting accept */ 132 struct list_head sc_accept_q; /* Conn. waiting accept */
133 int sc_ord; /* RDMA read limit */ 133 int sc_ord; /* RDMA read limit */
134 int sc_max_sge; 134 int sc_max_sge;
135 int sc_max_sge_rd; /* max sge for read target */
135 136
136 int sc_sq_depth; /* Depth of SQ */ 137 int sc_sq_depth; /* Depth of SQ */
137 atomic_t sc_sq_count; /* Number of SQ WR on queue */ 138 atomic_t sc_sq_count; /* Number of SQ WR on queue */
@@ -172,13 +173,6 @@ struct svcxprt_rdma {
172#define RDMAXPRT_SQ_PENDING 2 173#define RDMAXPRT_SQ_PENDING 2
173#define RDMAXPRT_CONN_PENDING 3 174#define RDMAXPRT_CONN_PENDING 3
174 175
175#define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */
176#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
177#define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD
178#else
179#define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
180#endif
181
182#define RPCRDMA_LISTEN_BACKLOG 10 176#define RPCRDMA_LISTEN_BACKLOG 10
183/* The default ORD value is based on two outstanding full-size writes with a 177/* The default ORD value is based on two outstanding full-size writes with a
184 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ 178 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
@@ -187,6 +181,8 @@ struct svcxprt_rdma {
187#define RPCRDMA_MAX_REQUESTS 32 181#define RPCRDMA_MAX_REQUESTS 32
188#define RPCRDMA_MAX_REQ_SIZE 4096 182#define RPCRDMA_MAX_REQ_SIZE 4096
189 183
184#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
185
190/* svc_rdma_marshal.c */ 186/* svc_rdma_marshal.c */
191extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); 187extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
192extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 188extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -213,6 +209,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
213 209
214/* svc_rdma_sendto.c */ 210/* svc_rdma_sendto.c */
215extern int svc_rdma_sendto(struct svc_rqst *); 211extern int svc_rdma_sendto(struct svc_rqst *);
212extern struct rpcrdma_read_chunk *
213 svc_rdma_get_read_chunk(struct rpcrdma_msg *);
216 214
217/* svc_rdma_transport.c */ 215/* svc_rdma_transport.c */
218extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 216extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
@@ -225,7 +223,6 @@ extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
225extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt); 223extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
226extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); 224extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
227extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); 225extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
228extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
229extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); 226extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
230extern void svc_rdma_put_frmr(struct svcxprt_rdma *, 227extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
231 struct svc_rdma_fastreg_mr *); 228 struct svc_rdma_fastreg_mr *);
@@ -238,83 +235,4 @@ extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
238extern int svc_rdma_init(void); 235extern int svc_rdma_init(void);
239extern void svc_rdma_cleanup(void); 236extern void svc_rdma_cleanup(void);
240 237
241/*
242 * Returns the address of the first read chunk or <nul> if no read chunk is
243 * present
244 */
245static inline struct rpcrdma_read_chunk *
246svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
247{
248 struct rpcrdma_read_chunk *ch =
249 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
250
251 if (ch->rc_discrim == 0)
252 return NULL;
253
254 return ch;
255}
256
257/*
258 * Returns the address of the first read write array element or <nul> if no
259 * write array list is present
260 */
261static inline struct rpcrdma_write_array *
262svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
263{
264 if (rmsgp->rm_body.rm_chunks[0] != 0
265 || rmsgp->rm_body.rm_chunks[1] == 0)
266 return NULL;
267
268 return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
269}
270
271/*
272 * Returns the address of the first reply array element or <nul> if no
273 * reply array is present
274 */
275static inline struct rpcrdma_write_array *
276svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
277{
278 struct rpcrdma_read_chunk *rch;
279 struct rpcrdma_write_array *wr_ary;
280 struct rpcrdma_write_array *rp_ary;
281
282 /* XXX: Need to fix when reply list may occur with read-list and/or
283 * write list */
284 if (rmsgp->rm_body.rm_chunks[0] != 0 ||
285 rmsgp->rm_body.rm_chunks[1] != 0)
286 return NULL;
287
288 rch = svc_rdma_get_read_chunk(rmsgp);
289 if (rch) {
290 while (rch->rc_discrim)
291 rch++;
292
293 /* The reply list follows an empty write array located
294 * at 'rc_position' here. The reply array is at rc_target.
295 */
296 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
297
298 goto found_it;
299 }
300
301 wr_ary = svc_rdma_get_write_array(rmsgp);
302 if (wr_ary) {
303 rp_ary = (struct rpcrdma_write_array *)
304 &wr_ary->
305 wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
306
307 goto found_it;
308 }
309
310 /* No read list, no write list */
311 rp_ary = (struct rpcrdma_write_array *)
312 &rmsgp->rm_body.rm_chunks[2];
313
314 found_it:
315 if (rp_ary->wc_discrim == 0)
316 return NULL;
317
318 return rp_ary;
319}
320#endif 238#endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79f6f8f3dc0a..78512cfe1fe6 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -116,6 +116,7 @@ void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
116 struct svc_serv *); 116 struct svc_serv *);
117int svc_create_xprt(struct svc_serv *, const char *, struct net *, 117int svc_create_xprt(struct svc_serv *, const char *, struct net *,
118 const int, const unsigned short, int); 118 const int, const unsigned short, int);
119void svc_xprt_do_enqueue(struct svc_xprt *xprt);
119void svc_xprt_enqueue(struct svc_xprt *xprt); 120void svc_xprt_enqueue(struct svc_xprt *xprt);
120void svc_xprt_put(struct svc_xprt *xprt); 121void svc_xprt_put(struct svc_xprt *xprt);
121void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); 122void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b17613052cc3..b7b279b54504 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -49,7 +49,7 @@
49 * a single chunk type per message is supported currently. 49 * a single chunk type per message is supported currently.
50 */ 50 */
51#define RPCRDMA_MIN_SLOT_TABLE (2U) 51#define RPCRDMA_MIN_SLOT_TABLE (2U)
52#define RPCRDMA_DEF_SLOT_TABLE (32U) 52#define RPCRDMA_DEF_SLOT_TABLE (128U)
53#define RPCRDMA_MAX_SLOT_TABLE (256U) 53#define RPCRDMA_MAX_SLOT_TABLE (256U)
54 54
55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */ 55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 38874729dc5f..7ba7dccaf0e7 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -351,7 +351,15 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages);
351extern int kswapd_run(int nid); 351extern int kswapd_run(int nid);
352extern void kswapd_stop(int nid); 352extern void kswapd_stop(int nid);
353#ifdef CONFIG_MEMCG 353#ifdef CONFIG_MEMCG
354extern int mem_cgroup_swappiness(struct mem_cgroup *mem); 354static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
355{
356 /* root ? */
357 if (mem_cgroup_disabled() || !memcg->css.parent)
358 return vm_swappiness;
359
360 return memcg->swappiness;
361}
362
355#else 363#else
356static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 364static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
357{ 365{
@@ -373,9 +381,9 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
373/* linux/mm/page_io.c */ 381/* linux/mm/page_io.c */
374extern int swap_readpage(struct page *); 382extern int swap_readpage(struct page *);
375extern int swap_writepage(struct page *page, struct writeback_control *wbc); 383extern int swap_writepage(struct page *page, struct writeback_control *wbc);
376extern void end_swap_bio_write(struct bio *bio, int err); 384extern void end_swap_bio_write(struct bio *bio);
377extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 385extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
378 void (*end_write_func)(struct bio *, int)); 386 bio_end_io_t end_write_func);
379extern int swap_set_page_dirty(struct page *page); 387extern int swap_set_page_dirty(struct page *page);
380 388
381int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 389int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
@@ -398,6 +406,9 @@ extern void free_pages_and_swap_cache(struct page **, int);
398extern struct page *lookup_swap_cache(swp_entry_t); 406extern struct page *lookup_swap_cache(swp_entry_t);
399extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, 407extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
400 struct vm_area_struct *vma, unsigned long addr); 408 struct vm_area_struct *vma, unsigned long addr);
409extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
410 struct vm_area_struct *vma, unsigned long addr,
411 bool *new_page_allocated);
401extern struct page *swapin_readahead(swp_entry_t, gfp_t, 412extern struct page *swapin_readahead(swp_entry_t, gfp_t,
402 struct vm_area_struct *vma, unsigned long addr); 413 struct vm_area_struct *vma, unsigned long addr);
403 414
@@ -431,6 +442,7 @@ extern unsigned int count_swap_pages(int, int);
431extern sector_t map_swap_page(struct page *, struct block_device **); 442extern sector_t map_swap_page(struct page *, struct block_device **);
432extern sector_t swapdev_block(int, pgoff_t); 443extern sector_t swapdev_block(int, pgoff_t);
433extern int page_swapcount(struct page *); 444extern int page_swapcount(struct page *);
445extern int swp_swapcount(swp_entry_t entry);
434extern struct swap_info_struct *page_swap_info(struct page *); 446extern struct swap_info_struct *page_swap_info(struct page *);
435extern int reuse_swap_page(struct page *); 447extern int reuse_swap_page(struct page *);
436extern int try_to_free_swap(struct page *); 448extern int try_to_free_swap(struct page *);
@@ -522,6 +534,11 @@ static inline int page_swapcount(struct page *page)
522 return 0; 534 return 0;
523} 535}
524 536
537static inline int swp_swapcount(swp_entry_t entry)
538{
539 return 0;
540}
541
525#define reuse_swap_page(page) (page_mapcount(page) == 1) 542#define reuse_swap_page(page) (page_mapcount(page) == 1)
526 543
527static inline int try_to_free_swap(struct page *page) 544static inline int try_to_free_swap(struct page *page)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index cedf3d3c373f..5c3a5f3e7eec 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -164,6 +164,9 @@ static inline int is_write_migration_entry(swp_entry_t entry)
164#endif 164#endif
165 165
166#ifdef CONFIG_MEMORY_FAILURE 166#ifdef CONFIG_MEMORY_FAILURE
167
168extern atomic_long_t num_poisoned_pages __read_mostly;
169
167/* 170/*
168 * Support for hardware poisoned pages 171 * Support for hardware poisoned pages
169 */ 172 */
@@ -177,6 +180,31 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
177{ 180{
178 return swp_type(entry) == SWP_HWPOISON; 181 return swp_type(entry) == SWP_HWPOISON;
179} 182}
183
184static inline bool test_set_page_hwpoison(struct page *page)
185{
186 return TestSetPageHWPoison(page);
187}
188
189static inline void num_poisoned_pages_inc(void)
190{
191 atomic_long_inc(&num_poisoned_pages);
192}
193
194static inline void num_poisoned_pages_dec(void)
195{
196 atomic_long_dec(&num_poisoned_pages);
197}
198
199static inline void num_poisoned_pages_add(long num)
200{
201 atomic_long_add(num, &num_poisoned_pages);
202}
203
204static inline void num_poisoned_pages_sub(long num)
205{
206 atomic_long_sub(num, &num_poisoned_pages);
207}
180#else 208#else
181 209
182static inline swp_entry_t make_hwpoison_entry(struct page *page) 210static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -188,6 +216,15 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
188{ 216{
189 return 0; 217 return 0;
190} 218}
219
220static inline bool test_set_page_hwpoison(struct page *page)
221{
222 return false;
223}
224
225static inline void num_poisoned_pages_inc(void)
226{
227}
191#endif 228#endif
192 229
193#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) 230#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b45c45b8c829..08001317aee7 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -810,6 +810,7 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
810asmlinkage long sys_eventfd(unsigned int count); 810asmlinkage long sys_eventfd(unsigned int count);
811asmlinkage long sys_eventfd2(unsigned int count, int flags); 811asmlinkage long sys_eventfd2(unsigned int count, int flags);
812asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); 812asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
813asmlinkage long sys_userfaultfd(int flags);
813asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); 814asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
814asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); 815asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
815asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, 816asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index c78dcfeaf25f..d4217eff489f 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,7 +86,6 @@ struct st_proto_s {
86extern long st_register(struct st_proto_s *); 86extern long st_register(struct st_proto_s *);
87extern long st_unregister(struct st_proto_s *); 87extern long st_unregister(struct st_proto_s *);
88 88
89extern struct ti_st_plat_data *dt_pdata;
90 89
91/* 90/*
92 * header information used by st_core.c 91 * header information used by st_core.c
diff --git a/include/linux/tick.h b/include/linux/tick.h
index edbfc9a5293e..48d901f83f92 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,22 +147,29 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
147 cpumask_or(mask, mask, tick_nohz_full_mask); 147 cpumask_or(mask, mask, tick_nohz_full_mask);
148} 148}
149 149
150extern void __tick_nohz_full_check(void);
151extern void tick_nohz_full_kick(void); 150extern void tick_nohz_full_kick(void);
152extern void tick_nohz_full_kick_cpu(int cpu); 151extern void tick_nohz_full_kick_cpu(int cpu);
153extern void tick_nohz_full_kick_all(void); 152extern void tick_nohz_full_kick_all(void);
154extern void __tick_nohz_task_switch(struct task_struct *tsk); 153extern void __tick_nohz_task_switch(void);
155#else 154#else
156static inline bool tick_nohz_full_enabled(void) { return false; } 155static inline bool tick_nohz_full_enabled(void) { return false; }
157static inline bool tick_nohz_full_cpu(int cpu) { return false; } 156static inline bool tick_nohz_full_cpu(int cpu) { return false; }
158static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 157static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
159static inline void __tick_nohz_full_check(void) { }
160static inline void tick_nohz_full_kick_cpu(int cpu) { } 158static inline void tick_nohz_full_kick_cpu(int cpu) { }
161static inline void tick_nohz_full_kick(void) { } 159static inline void tick_nohz_full_kick(void) { }
162static inline void tick_nohz_full_kick_all(void) { } 160static inline void tick_nohz_full_kick_all(void) { }
163static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } 161static inline void __tick_nohz_task_switch(void) { }
164#endif 162#endif
165 163
164static inline const struct cpumask *housekeeping_cpumask(void)
165{
166#ifdef CONFIG_NO_HZ_FULL
167 if (tick_nohz_full_enabled())
168 return housekeeping_mask;
169#endif
170 return cpu_possible_mask;
171}
172
166static inline bool is_housekeeping_cpu(int cpu) 173static inline bool is_housekeeping_cpu(int cpu)
167{ 174{
168#ifdef CONFIG_NO_HZ_FULL 175#ifdef CONFIG_NO_HZ_FULL
@@ -181,16 +188,10 @@ static inline void housekeeping_affine(struct task_struct *t)
181#endif 188#endif
182} 189}
183 190
184static inline void tick_nohz_full_check(void) 191static inline void tick_nohz_task_switch(void)
185{
186 if (tick_nohz_full_enabled())
187 __tick_nohz_full_check();
188}
189
190static inline void tick_nohz_task_switch(struct task_struct *tsk)
191{ 192{
192 if (tick_nohz_full_enabled()) 193 if (tick_nohz_full_enabled())
193 __tick_nohz_task_switch(tsk); 194 __tick_nohz_task_switch();
194} 195}
195 196
196#endif 197#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 77b5df2acd2a..367d5af899e8 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,11 +12,18 @@ typedef __s64 time64_t;
12 */ 12 */
13#if __BITS_PER_LONG == 64 13#if __BITS_PER_LONG == 64
14# define timespec64 timespec 14# define timespec64 timespec
15#define itimerspec64 itimerspec
15#else 16#else
16struct timespec64 { 17struct timespec64 {
17 time64_t tv_sec; /* seconds */ 18 time64_t tv_sec; /* seconds */
18 long tv_nsec; /* nanoseconds */ 19 long tv_nsec; /* nanoseconds */
19}; 20};
21
22struct itimerspec64 {
23 struct timespec64 it_interval;
24 struct timespec64 it_value;
25};
26
20#endif 27#endif
21 28
22/* Parameters used to convert the timespec values: */ 29/* Parameters used to convert the timespec values: */
@@ -45,6 +52,16 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
45 return ts; 52 return ts;
46} 53}
47 54
55static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
56{
57 return *its64;
58}
59
60static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
61{
62 return *its;
63}
64
48# define timespec64_equal timespec_equal 65# define timespec64_equal timespec_equal
49# define timespec64_compare timespec_compare 66# define timespec64_compare timespec_compare
50# define set_normalized_timespec64 set_normalized_timespec 67# define set_normalized_timespec64 set_normalized_timespec
@@ -77,6 +94,24 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
77 return ret; 94 return ret;
78} 95}
79 96
97static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
98{
99 struct itimerspec ret;
100
101 ret.it_interval = timespec64_to_timespec(its64->it_interval);
102 ret.it_value = timespec64_to_timespec(its64->it_value);
103 return ret;
104}
105
106static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
107{
108 struct itimerspec64 ret;
109
110 ret.it_interval = timespec_to_timespec64(its->it_interval);
111 ret.it_value = timespec_to_timespec64(its->it_value);
112 return ret;
113}
114
80static inline int timespec64_equal(const struct timespec64 *a, 115static inline int timespec64_equal(const struct timespec64 *a,
81 const struct timespec64 *b) 116 const struct timespec64 *b)
82{ 117{
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 6e191e4e6ab6..ba0ae09cbb21 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -18,10 +18,17 @@ extern int do_sys_settimeofday(const struct timespec *tv,
18 * Kernel time accessors 18 * Kernel time accessors
19 */ 19 */
20unsigned long get_seconds(void); 20unsigned long get_seconds(void);
21struct timespec current_kernel_time(void); 21struct timespec64 current_kernel_time64(void);
22/* does not take xtime_lock */ 22/* does not take xtime_lock */
23struct timespec __current_kernel_time(void); 23struct timespec __current_kernel_time(void);
24 24
25static inline struct timespec current_kernel_time(void)
26{
27 struct timespec64 now = current_kernel_time64();
28
29 return timespec64_to_timespec(now);
30}
31
25/* 32/*
26 * timespec based interfaces 33 * timespec based interfaces
27 */ 34 */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 1063c850dbab..ed27917cabc9 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -243,6 +243,7 @@ enum {
243 TRACE_EVENT_FL_USE_CALL_FILTER_BIT, 243 TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
244 TRACE_EVENT_FL_TRACEPOINT_BIT, 244 TRACE_EVENT_FL_TRACEPOINT_BIT,
245 TRACE_EVENT_FL_KPROBE_BIT, 245 TRACE_EVENT_FL_KPROBE_BIT,
246 TRACE_EVENT_FL_UPROBE_BIT,
246}; 247};
247 248
248/* 249/*
@@ -257,6 +258,7 @@ enum {
257 * USE_CALL_FILTER - For trace internal events, don't use file filter 258 * USE_CALL_FILTER - For trace internal events, don't use file filter
258 * TRACEPOINT - Event is a tracepoint 259 * TRACEPOINT - Event is a tracepoint
259 * KPROBE - Event is a kprobe 260 * KPROBE - Event is a kprobe
261 * UPROBE - Event is a uprobe
260 */ 262 */
261enum { 263enum {
262 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 264 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -267,8 +269,11 @@ enum {
267 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), 269 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
268 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), 270 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
269 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 271 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
272 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
270}; 273};
271 274
275#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
276
272struct trace_event_call { 277struct trace_event_call {
273 struct list_head list; 278 struct list_head list;
274 struct trace_event_class *class; 279 struct trace_event_class *class;
@@ -542,7 +547,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
542 event_triggers_post_call(file, tt); 547 event_triggers_post_call(file, tt);
543} 548}
544 549
545#ifdef CONFIG_BPF_SYSCALL 550#ifdef CONFIG_BPF_EVENTS
546unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx); 551unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
547#else 552#else
548static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) 553static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index ad6c8913aa3e..d072ded41678 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -709,4 +709,10 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {}
709static inline void proc_tty_unregister_driver(struct tty_driver *d) {} 709static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
710#endif 710#endif
711 711
712#define tty_debug(tty, f, args...) \
713 do { \
714 printk(KERN_DEBUG "%s: %s: " f, __func__, \
715 tty_name(tty), ##args); \
716 } while (0)
717
712#endif 718#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 92e337c18839..161052477f77 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -296,7 +296,7 @@ struct tty_operations {
296struct tty_driver { 296struct tty_driver {
297 int magic; /* magic number for this structure */ 297 int magic; /* magic number for this structure */
298 struct kref kref; /* Reference management */ 298 struct kref kref; /* Reference management */
299 struct cdev *cdevs; 299 struct cdev **cdevs;
300 struct module *owner; 300 struct module *owner;
301 const char *driver_name; 301 const char *driver_name;
302 const char *name; 302 const char *name;
diff --git a/include/linux/types.h b/include/linux/types.h
index 8715287c3b1f..c314989d9158 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,6 +212,9 @@ struct callback_head {
212}; 212};
213#define rcu_head callback_head 213#define rcu_head callback_head
214 214
215typedef void (*rcu_callback_t)(struct rcu_head *head);
216typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
217
215/* clocksource cycle base type */ 218/* clocksource cycle base type */
216typedef u64 cycle_t; 219typedef u64 cycle_t;
217 220
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ae572c138607..d6f2c2c5b043 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -129,4 +129,6 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
129extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); 129extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
130extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); 130extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
131 131
132extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
133
132#endif /* __LINUX_UACCESS_H__ */ 134#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 60beb5dc7977..0bdc72f36905 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -92,6 +92,22 @@ struct uprobe_task {
92 unsigned int depth; 92 unsigned int depth;
93}; 93};
94 94
95struct return_instance {
96 struct uprobe *uprobe;
97 unsigned long func;
98 unsigned long stack; /* stack pointer */
99 unsigned long orig_ret_vaddr; /* original return address */
100 bool chained; /* true, if instance is nested */
101
102 struct return_instance *next; /* keep as stack */
103};
104
105enum rp_check {
106 RP_CHECK_CALL,
107 RP_CHECK_CHAIN_CALL,
108 RP_CHECK_RET,
109};
110
95struct xol_area; 111struct xol_area;
96 112
97struct uprobes_state { 113struct uprobes_state {
@@ -128,6 +144,7 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 144extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 145extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); 146extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
147extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
131extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); 148extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
132extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 149extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
133 void *src, unsigned long len); 150 void *src, unsigned long len);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ab94f78c4dd1..a41833cd184c 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -19,8 +19,11 @@ struct ci_hdrc_platform_data {
19 enum usb_phy_interface phy_mode; 19 enum usb_phy_interface phy_mode;
20 unsigned long flags; 20 unsigned long flags;
21#define CI_HDRC_REGS_SHARED BIT(0) 21#define CI_HDRC_REGS_SHARED BIT(0)
22#define CI_HDRC_DISABLE_DEVICE_STREAMING BIT(1)
22#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2) 23#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
23#define CI_HDRC_DISABLE_STREAMING BIT(3) 24#define CI_HDRC_DISABLE_HOST_STREAMING BIT(3)
25#define CI_HDRC_DISABLE_STREAMING (CI_HDRC_DISABLE_DEVICE_STREAMING | \
26 CI_HDRC_DISABLE_HOST_STREAMING)
24 /* 27 /*
25 * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, 28 * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
26 * but otg is not supported (no register otgsc). 29 * but otg is not supported (no register otgsc).
@@ -29,12 +32,22 @@ struct ci_hdrc_platform_data {
29#define CI_HDRC_IMX28_WRITE_FIX BIT(5) 32#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
30#define CI_HDRC_FORCE_FULLSPEED BIT(6) 33#define CI_HDRC_FORCE_FULLSPEED BIT(6)
31#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7) 34#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7)
35#define CI_HDRC_SET_NON_ZERO_TTHA BIT(8)
36#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9)
37#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
38#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
32 enum usb_dr_mode dr_mode; 39 enum usb_dr_mode dr_mode;
33#define CI_HDRC_CONTROLLER_RESET_EVENT 0 40#define CI_HDRC_CONTROLLER_RESET_EVENT 0
34#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 41#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
35 void (*notify_event) (struct ci_hdrc *ci, unsigned event); 42 void (*notify_event) (struct ci_hdrc *ci, unsigned event);
36 struct regulator *reg_vbus; 43 struct regulator *reg_vbus;
44 struct usb_otg_caps ci_otg_caps;
37 bool tpl_support; 45 bool tpl_support;
46 /* interrupt threshold setting */
47 u32 itc_setting;
48 u32 ahb_burst_config;
49 u32 tx_burst_size;
50 u32 rx_burst_size;
38}; 51};
39 52
40/* Default offset of capability registers */ 53/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2511469a9904..1074b8921a5d 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -228,6 +228,8 @@ struct usb_function {
228 struct list_head list; 228 struct list_head list;
229 DECLARE_BITMAP(endpoints, 32); 229 DECLARE_BITMAP(endpoints, 32);
230 const struct usb_function_instance *fi; 230 const struct usb_function_instance *fi;
231
232 unsigned int bind_deactivated:1;
231}; 233};
232 234
233int usb_add_function(struct usb_configuration *, struct usb_function *); 235int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 4f3dfb7d0654..c14a69b36d27 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -141,10 +141,49 @@ struct usb_ep_ops {
141}; 141};
142 142
143/** 143/**
144 * struct usb_ep_caps - endpoint capabilities description
145 * @type_control:Endpoint supports control type (reserved for ep0).
146 * @type_iso:Endpoint supports isochronous transfers.
147 * @type_bulk:Endpoint supports bulk transfers.
148 * @type_int:Endpoint supports interrupt transfers.
149 * @dir_in:Endpoint supports IN direction.
150 * @dir_out:Endpoint supports OUT direction.
151 */
152struct usb_ep_caps {
153 unsigned type_control:1;
154 unsigned type_iso:1;
155 unsigned type_bulk:1;
156 unsigned type_int:1;
157 unsigned dir_in:1;
158 unsigned dir_out:1;
159};
160
161#define USB_EP_CAPS_TYPE_CONTROL 0x01
162#define USB_EP_CAPS_TYPE_ISO 0x02
163#define USB_EP_CAPS_TYPE_BULK 0x04
164#define USB_EP_CAPS_TYPE_INT 0x08
165#define USB_EP_CAPS_TYPE_ALL \
166 (USB_EP_CAPS_TYPE_ISO | USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
167#define USB_EP_CAPS_DIR_IN 0x01
168#define USB_EP_CAPS_DIR_OUT 0x02
169#define USB_EP_CAPS_DIR_ALL (USB_EP_CAPS_DIR_IN | USB_EP_CAPS_DIR_OUT)
170
171#define USB_EP_CAPS(_type, _dir) \
172 { \
173 .type_control = !!(_type & USB_EP_CAPS_TYPE_CONTROL), \
174 .type_iso = !!(_type & USB_EP_CAPS_TYPE_ISO), \
175 .type_bulk = !!(_type & USB_EP_CAPS_TYPE_BULK), \
176 .type_int = !!(_type & USB_EP_CAPS_TYPE_INT), \
177 .dir_in = !!(_dir & USB_EP_CAPS_DIR_IN), \
178 .dir_out = !!(_dir & USB_EP_CAPS_DIR_OUT), \
179 }
180
181/**
144 * struct usb_ep - device side representation of USB endpoint 182 * struct usb_ep - device side representation of USB endpoint
145 * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" 183 * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
146 * @ops: Function pointers used to access hardware-specific operations. 184 * @ops: Function pointers used to access hardware-specific operations.
147 * @ep_list:the gadget's ep_list holds all of its endpoints 185 * @ep_list:the gadget's ep_list holds all of its endpoints
186 * @caps:The structure describing types and directions supported by endoint.
148 * @maxpacket:The maximum packet size used on this endpoint. The initial 187 * @maxpacket:The maximum packet size used on this endpoint. The initial
149 * value can sometimes be reduced (hardware allowing), according to 188 * value can sometimes be reduced (hardware allowing), according to
150 * the endpoint descriptor used to configure the endpoint. 189 * the endpoint descriptor used to configure the endpoint.
@@ -167,12 +206,15 @@ struct usb_ep_ops {
167 * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list, 206 * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list,
168 * and is accessed only in response to a driver setup() callback. 207 * and is accessed only in response to a driver setup() callback.
169 */ 208 */
209
170struct usb_ep { 210struct usb_ep {
171 void *driver_data; 211 void *driver_data;
172 212
173 const char *name; 213 const char *name;
174 const struct usb_ep_ops *ops; 214 const struct usb_ep_ops *ops;
175 struct list_head ep_list; 215 struct list_head ep_list;
216 struct usb_ep_caps caps;
217 bool claimed;
176 unsigned maxpacket:16; 218 unsigned maxpacket:16;
177 unsigned maxpacket_limit:16; 219 unsigned maxpacket_limit:16;
178 unsigned max_streams:16; 220 unsigned max_streams:16;
@@ -492,6 +534,9 @@ struct usb_gadget_ops {
492 int (*udc_start)(struct usb_gadget *, 534 int (*udc_start)(struct usb_gadget *,
493 struct usb_gadget_driver *); 535 struct usb_gadget_driver *);
494 int (*udc_stop)(struct usb_gadget *); 536 int (*udc_stop)(struct usb_gadget *);
537 struct usb_ep *(*match_ep)(struct usb_gadget *,
538 struct usb_endpoint_descriptor *,
539 struct usb_ss_ep_comp_descriptor *);
495}; 540};
496 541
497/** 542/**
@@ -511,6 +556,7 @@ struct usb_gadget_ops {
511 * @dev: Driver model state for this abstract device. 556 * @dev: Driver model state for this abstract device.
512 * @out_epnum: last used out ep number 557 * @out_epnum: last used out ep number
513 * @in_epnum: last used in ep number 558 * @in_epnum: last used in ep number
559 * @otg_caps: OTG capabilities of this gadget.
514 * @sg_supported: true if we can handle scatter-gather 560 * @sg_supported: true if we can handle scatter-gather
515 * @is_otg: True if the USB device port uses a Mini-AB jack, so that the 561 * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
516 * gadget driver must provide a USB OTG descriptor. 562 * gadget driver must provide a USB OTG descriptor.
@@ -526,6 +572,9 @@ struct usb_gadget_ops {
526 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to 572 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
527 * MaxPacketSize. 573 * MaxPacketSize.
528 * @is_selfpowered: if the gadget is self-powered. 574 * @is_selfpowered: if the gadget is self-powered.
575 * @deactivated: True if gadget is deactivated - in deactivated state it cannot
576 * be connected.
577 * @connected: True if gadget is connected.
529 * 578 *
530 * Gadgets have a mostly-portable "gadget driver" implementing device 579 * Gadgets have a mostly-portable "gadget driver" implementing device
531 * functions, handling all usb configurations and interfaces. Gadget 580 * functions, handling all usb configurations and interfaces. Gadget
@@ -559,6 +608,7 @@ struct usb_gadget {
559 struct device dev; 608 struct device dev;
560 unsigned out_epnum; 609 unsigned out_epnum;
561 unsigned in_epnum; 610 unsigned in_epnum;
611 struct usb_otg_caps *otg_caps;
562 612
563 unsigned sg_supported:1; 613 unsigned sg_supported:1;
564 unsigned is_otg:1; 614 unsigned is_otg:1;
@@ -567,7 +617,12 @@ struct usb_gadget {
567 unsigned a_hnp_support:1; 617 unsigned a_hnp_support:1;
568 unsigned a_alt_hnp_support:1; 618 unsigned a_alt_hnp_support:1;
569 unsigned quirk_ep_out_aligned_size:1; 619 unsigned quirk_ep_out_aligned_size:1;
620 unsigned quirk_altset_not_supp:1;
621 unsigned quirk_stall_not_supp:1;
622 unsigned quirk_zlp_not_supp:1;
570 unsigned is_selfpowered:1; 623 unsigned is_selfpowered:1;
624 unsigned deactivated:1;
625 unsigned connected:1;
571}; 626};
572#define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) 627#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
573 628
@@ -584,7 +639,6 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
584#define gadget_for_each_ep(tmp, gadget) \ 639#define gadget_for_each_ep(tmp, gadget) \
585 list_for_each_entry(tmp, &(gadget)->ep_list, ep_list) 640 list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
586 641
587
588/** 642/**
589 * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget 643 * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
590 * requires quirk_ep_out_aligned_size, otherwise reguens len. 644 * requires quirk_ep_out_aligned_size, otherwise reguens len.
@@ -603,6 +657,34 @@ usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
603} 657}
604 658
605/** 659/**
660 * gadget_is_altset_supported - return true iff the hardware supports
661 * altsettings
662 * @g: controller to check for quirk
663 */
664static inline int gadget_is_altset_supported(struct usb_gadget *g)
665{
666 return !g->quirk_altset_not_supp;
667}
668
669/**
670 * gadget_is_stall_supported - return true iff the hardware supports stalling
671 * @g: controller to check for quirk
672 */
673static inline int gadget_is_stall_supported(struct usb_gadget *g)
674{
675 return !g->quirk_stall_not_supp;
676}
677
678/**
679 * gadget_is_zlp_supported - return true iff the hardware supports zlp
680 * @g: controller to check for quirk
681 */
682static inline int gadget_is_zlp_supported(struct usb_gadget *g)
683{
684 return !g->quirk_zlp_not_supp;
685}
686
687/**
606 * gadget_is_dualspeed - return true iff the hardware handles high speed 688 * gadget_is_dualspeed - return true iff the hardware handles high speed
607 * @g: controller that might support both high and full speeds 689 * @g: controller that might support both high and full speeds
608 */ 690 */
@@ -771,9 +853,24 @@ static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
771 */ 853 */
772static inline int usb_gadget_connect(struct usb_gadget *gadget) 854static inline int usb_gadget_connect(struct usb_gadget *gadget)
773{ 855{
856 int ret;
857
774 if (!gadget->ops->pullup) 858 if (!gadget->ops->pullup)
775 return -EOPNOTSUPP; 859 return -EOPNOTSUPP;
776 return gadget->ops->pullup(gadget, 1); 860
861 if (gadget->deactivated) {
862 /*
863 * If gadget is deactivated we only save new state.
864 * Gadget will be connected automatically after activation.
865 */
866 gadget->connected = true;
867 return 0;
868 }
869
870 ret = gadget->ops->pullup(gadget, 1);
871 if (!ret)
872 gadget->connected = 1;
873 return ret;
777} 874}
778 875
779/** 876/**
@@ -784,20 +881,88 @@ static inline int usb_gadget_connect(struct usb_gadget *gadget)
784 * as a disconnect (when a VBUS session is active). Not all systems 881 * as a disconnect (when a VBUS session is active). Not all systems
785 * support software pullup controls. 882 * support software pullup controls.
786 * 883 *
884 * Returns zero on success, else negative errno.
885 */
886static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
887{
888 int ret;
889
890 if (!gadget->ops->pullup)
891 return -EOPNOTSUPP;
892
893 if (gadget->deactivated) {
894 /*
895 * If gadget is deactivated we only save new state.
896 * Gadget will stay disconnected after activation.
897 */
898 gadget->connected = false;
899 return 0;
900 }
901
902 ret = gadget->ops->pullup(gadget, 0);
903 if (!ret)
904 gadget->connected = 0;
905 return ret;
906}
907
908/**
909 * usb_gadget_deactivate - deactivate function which is not ready to work
910 * @gadget: the peripheral being deactivated
911 *
787 * This routine may be used during the gadget driver bind() call to prevent 912 * This routine may be used during the gadget driver bind() call to prevent
788 * the peripheral from ever being visible to the USB host, unless later 913 * the peripheral from ever being visible to the USB host, unless later
789 * usb_gadget_connect() is called. For example, user mode components may 914 * usb_gadget_activate() is called. For example, user mode components may
790 * need to be activated before the system can talk to hosts. 915 * need to be activated before the system can talk to hosts.
791 * 916 *
792 * Returns zero on success, else negative errno. 917 * Returns zero on success, else negative errno.
793 */ 918 */
794static inline int usb_gadget_disconnect(struct usb_gadget *gadget) 919static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
795{ 920{
796 if (!gadget->ops->pullup) 921 int ret;
797 return -EOPNOTSUPP; 922
798 return gadget->ops->pullup(gadget, 0); 923 if (gadget->deactivated)
924 return 0;
925
926 if (gadget->connected) {
927 ret = usb_gadget_disconnect(gadget);
928 if (ret)
929 return ret;
930 /*
931 * If gadget was being connected before deactivation, we want
932 * to reconnect it in usb_gadget_activate().
933 */
934 gadget->connected = true;
935 }
936 gadget->deactivated = true;
937
938 return 0;
799} 939}
800 940
941/**
942 * usb_gadget_activate - activate function which is not ready to work
943 * @gadget: the peripheral being activated
944 *
945 * This routine activates gadget which was previously deactivated with
946 * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
947 *
948 * Returns zero on success, else negative errno.
949 */
950static inline int usb_gadget_activate(struct usb_gadget *gadget)
951{
952 if (!gadget->deactivated)
953 return 0;
954
955 gadget->deactivated = false;
956
957 /*
958 * If gadget has been connected before deactivation, or became connected
959 * while it was being deactivated, we call usb_gadget_connect().
960 */
961 if (gadget->connected)
962 return usb_gadget_connect(gadget);
963
964 return 0;
965}
801 966
802/*-------------------------------------------------------------------------*/ 967/*-------------------------------------------------------------------------*/
803 968
@@ -1002,6 +1167,10 @@ int usb_assign_descriptors(struct usb_function *f,
1002 struct usb_descriptor_header **ss); 1167 struct usb_descriptor_header **ss);
1003void usb_free_all_descriptors(struct usb_function *f); 1168void usb_free_all_descriptors(struct usb_function *f);
1004 1169
1170struct usb_descriptor_header *usb_otg_descriptor_alloc(
1171 struct usb_gadget *gadget);
1172int usb_otg_descriptor_init(struct usb_gadget *gadget,
1173 struct usb_descriptor_header *otg_desc);
1005/*-------------------------------------------------------------------------*/ 1174/*-------------------------------------------------------------------------*/
1006 1175
1007/* utility to simplify map/unmap of usb_requests to/from DMA */ 1176/* utility to simplify map/unmap of usb_requests to/from DMA */
@@ -1034,6 +1203,21 @@ extern void usb_gadget_giveback_request(struct usb_ep *ep,
1034 1203
1035/*-------------------------------------------------------------------------*/ 1204/*-------------------------------------------------------------------------*/
1036 1205
1206/* utility to find endpoint by name */
1207
1208extern struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g,
1209 const char *name);
1210
1211/*-------------------------------------------------------------------------*/
1212
1213/* utility to check if endpoint caps match descriptor needs */
1214
1215extern int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
1216 struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
1217 struct usb_ss_ep_comp_descriptor *ep_comp);
1218
1219/*-------------------------------------------------------------------------*/
1220
1037/* utility to update vbus status for udc core, it may be scheduled */ 1221/* utility to update vbus status for udc core, it may be scheduled */
1038extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status); 1222extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
1039 1223
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index c9aa7792de10..d2784c10bfe2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -564,9 +564,9 @@ extern void usb_ep0_reinit(struct usb_device *);
564 564
565/*-------------------------------------------------------------------------*/ 565/*-------------------------------------------------------------------------*/
566 566
567/* class requests from USB 3.0 hub spec, table 10-5 */ 567/* class requests from USB 3.1 hub spec, table 10-7 */
568#define SetHubDepth (0x3000 | HUB_SET_DEPTH) 568#define SetHubDepth (0x2000 | HUB_SET_DEPTH)
569#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT) 569#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT)
570 570
571/* 571/*
572 * Generic bandwidth allocation constants/support 572 * Generic bandwidth allocation constants/support
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index e55a1504266e..8c8f6854c993 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -128,7 +128,7 @@ struct msm_otg_platform_data {
128 */ 128 */
129struct msm_usb_cable { 129struct msm_usb_cable {
130 struct notifier_block nb; 130 struct notifier_block nb;
131 struct extcon_specific_cable_nb conn; 131 struct extcon_dev *extcon;
132}; 132};
133 133
134/** 134/**
@@ -155,6 +155,10 @@ struct msm_usb_cable {
155 * starting controller using usbcmd run/stop bit. 155 * starting controller using usbcmd run/stop bit.
156 * @vbus: VBUS signal state trakining, using extcon framework 156 * @vbus: VBUS signal state trakining, using extcon framework
157 * @id: ID signal state trakining, using extcon framework 157 * @id: ID signal state trakining, using extcon framework
158 * @switch_gpio: Descriptor for GPIO used to control external Dual
159 * SPDT USB Switch.
160 * @reboot: Used to inform the driver to route USB D+/D- line to Device
161 * connector
158 */ 162 */
159struct msm_otg { 163struct msm_otg {
160 struct usb_phy phy; 164 struct usb_phy phy;
@@ -188,6 +192,9 @@ struct msm_otg {
188 192
189 struct msm_usb_cable vbus; 193 struct msm_usb_cable vbus;
190 struct msm_usb_cable id; 194 struct msm_usb_cable id;
195
196 struct gpio_desc *switch_gpio;
197 struct notifier_block reboot;
191}; 198};
192 199
193#endif 200#endif
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index cfe0528cdbb1..8c5a818ec244 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -15,6 +15,8 @@
15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); 15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); 16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
17bool of_usb_host_tpl_support(struct device_node *np); 17bool of_usb_host_tpl_support(struct device_node *np);
18int of_usb_update_otg_caps(struct device_node *np,
19 struct usb_otg_caps *otg_caps);
18#else 20#else
19static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) 21static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
20{ 22{
@@ -30,6 +32,11 @@ static inline bool of_usb_host_tpl_support(struct device_node *np)
30{ 32{
31 return false; 33 return false;
32} 34}
35static inline int of_usb_update_otg_caps(struct device_node *np,
36 struct usb_otg_caps *otg_caps)
37{
38 return 0;
39}
33#endif 40#endif
34 41
35#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) 42#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 52661c5da690..bd1dcf816100 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -41,6 +41,21 @@ struct usb_otg {
41 41
42}; 42};
43 43
44/**
45 * struct usb_otg_caps - describes the otg capabilities of the device
46 * @otg_rev: The OTG revision number the device is compliant with, it's
47 * in binary-coded decimal (i.e. 2.0 is 0200H).
48 * @hnp_support: Indicates if the device supports HNP.
49 * @srp_support: Indicates if the device supports SRP.
50 * @adp_support: Indicates if the device supports ADP.
51 */
52struct usb_otg_caps {
53 u16 otg_rev;
54 bool hnp_support;
55 bool srp_support;
56 bool adp_support;
57};
58
44extern const char *usb_otg_state_string(enum usb_otg_state state); 59extern const char *usb_otg_state_string(enum usb_otg_state state);
45 60
46/* Context: can sleep */ 61/* Context: can sleep */
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
new file mode 100644
index 000000000000..587480ad41b7
--- /dev/null
+++ b/include/linux/userfaultfd_k.h
@@ -0,0 +1,85 @@
1/*
2 * include/linux/userfaultfd_k.h
3 *
4 * Copyright (C) 2015 Red Hat, Inc.
5 *
6 */
7
8#ifndef _LINUX_USERFAULTFD_K_H
9#define _LINUX_USERFAULTFD_K_H
10
11#ifdef CONFIG_USERFAULTFD
12
13#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
14
15#include <linux/fcntl.h>
16
17/*
18 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
19 * new flags, since they might collide with O_* ones. We want
20 * to re-use O_* flags that couldn't possibly have a meaning
21 * from userfaultfd, in order to leave a free define-space for
22 * shared O_* flags.
23 */
24#define UFFD_CLOEXEC O_CLOEXEC
25#define UFFD_NONBLOCK O_NONBLOCK
26
27#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
28#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
29
30extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
31 unsigned int flags, unsigned long reason);
32
33extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
34 unsigned long src_start, unsigned long len);
35extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
36 unsigned long dst_start,
37 unsigned long len);
38
39/* mm helpers */
40static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
41 struct vm_userfaultfd_ctx vm_ctx)
42{
43 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
44}
45
46static inline bool userfaultfd_missing(struct vm_area_struct *vma)
47{
48 return vma->vm_flags & VM_UFFD_MISSING;
49}
50
51static inline bool userfaultfd_armed(struct vm_area_struct *vma)
52{
53 return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
54}
55
56#else /* CONFIG_USERFAULTFD */
57
58/* mm helpers */
59static inline int handle_userfault(struct vm_area_struct *vma,
60 unsigned long address,
61 unsigned int flags,
62 unsigned long reason)
63{
64 return VM_FAULT_SIGBUS;
65}
66
67static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
68 struct vm_userfaultfd_ctx vm_ctx)
69{
70 return true;
71}
72
73static inline bool userfaultfd_missing(struct vm_area_struct *vma)
74{
75 return false;
76}
77
78static inline bool userfaultfd_armed(struct vm_area_struct *vma)
79{
80 return false;
81}
82
83#endif /* CONFIG_USERFAULTFD */
84
85#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
index ac34819214f9..da2049b5161c 100644
--- a/include/linux/verify_pefile.h
+++ b/include/linux/verify_pefile.h
@@ -12,7 +12,11 @@
12#ifndef _LINUX_VERIFY_PEFILE_H 12#ifndef _LINUX_VERIFY_PEFILE_H
13#define _LINUX_VERIFY_PEFILE_H 13#define _LINUX_VERIFY_PEFILE_H
14 14
15#include <crypto/public_key.h>
16
15extern int verify_pefile_signature(const void *pebuf, unsigned pelen, 17extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
16 struct key *trusted_keyring, bool *_trusted); 18 struct key *trusted_keyring,
19 enum key_being_used_for usage,
20 bool *_trusted);
17 21
18#endif /* _LINUX_VERIFY_PEFILE_H */ 22#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f963a9..d3d077228d4c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,7 +147,8 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
147 147
148typedef int wait_bit_action_f(struct wait_bit_key *); 148typedef int wait_bit_action_f(struct wait_bit_key *);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
151 void *key);
151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 152void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 153void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 154void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -179,7 +180,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
179#define wake_up_poll(x, m) \ 180#define wake_up_poll(x, m) \
180 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 181 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
181#define wake_up_locked_poll(x, m) \ 182#define wake_up_locked_poll(x, m) \
182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 183 __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
183#define wake_up_interruptible_poll(x, m) \ 184#define wake_up_interruptible_poll(x, m) \
184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 185 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
185#define wake_up_interruptible_sync_poll(x, m) \ 186#define wake_up_interruptible_sync_poll(x, m) \
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index f47feada5b42..d74a0e907b9e 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -140,12 +140,4 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
140extern int watchdog_register_device(struct watchdog_device *); 140extern int watchdog_register_device(struct watchdog_device *);
141extern void watchdog_unregister_device(struct watchdog_device *); 141extern void watchdog_unregister_device(struct watchdog_device *);
142 142
143#ifdef CONFIG_HARDLOCKUP_DETECTOR
144void watchdog_nmi_disable_all(void);
145void watchdog_nmi_enable_all(void);
146#else
147static inline void watchdog_nmi_disable_all(void) {}
148static inline void watchdog_nmi_enable_all(void) {}
149#endif
150
151#endif /* ifndef _LINUX_WATCHDOG_H */ 143#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 738b30b39b68..0197358f1e81 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -265,7 +265,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
265/** 265/**
266 * delayed_work_pending - Find out whether a delayable work item is currently 266 * delayed_work_pending - Find out whether a delayable work item is currently
267 * pending 267 * pending
268 * @work: The work item in question 268 * @w: The work item in question
269 */ 269 */
270#define delayed_work_pending(w) \ 270#define delayed_work_pending(w) \
271 work_pending(&(w)->work) 271 work_pending(&(w)->work)
@@ -366,7 +366,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
366 * @fmt: printf format for the name of the workqueue 366 * @fmt: printf format for the name of the workqueue
367 * @flags: WQ_* flags 367 * @flags: WQ_* flags
368 * @max_active: max in-flight work items, 0 for default 368 * @max_active: max in-flight work items, 0 for default
369 * @args: args for @fmt 369 * @args...: args for @fmt
370 * 370 *
371 * Allocate a workqueue with the specified parameters. For detailed 371 * Allocate a workqueue with the specified parameters. For detailed
372 * information on WQ_* flags, please refer to Documentation/workqueue.txt. 372 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
398 * alloc_ordered_workqueue - allocate an ordered workqueue 398 * alloc_ordered_workqueue - allocate an ordered workqueue
399 * @fmt: printf format for the name of the workqueue 399 * @fmt: printf format for the name of the workqueue
400 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 400 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
401 * @args: args for @fmt 401 * @args...: args for @fmt
402 * 402 *
403 * Allocate an ordered workqueue. An ordered workqueue executes at 403 * Allocate an ordered workqueue. An ordered workqueue executes at
404 * most one work item at any given time in the queued order. They are 404 * most one work item at any given time in the queued order. They are
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
index f9d41a6e361f..e183a0a65ac1 100644
--- a/include/linux/zbud.h
+++ b/include/linux/zbud.h
@@ -9,7 +9,7 @@ struct zbud_ops {
9 int (*evict)(struct zbud_pool *pool, unsigned long handle); 9 int (*evict)(struct zbud_pool *pool, unsigned long handle);
10}; 10};
11 11
12struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); 12struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
13void zbud_destroy_pool(struct zbud_pool *pool); 13void zbud_destroy_pool(struct zbud_pool *pool);
14int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, 14int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
15 unsigned long *handle); 15 unsigned long *handle);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index d30eff3d84d5..42f8ec992452 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -36,8 +36,10 @@ enum zpool_mapmode {
36 ZPOOL_MM_DEFAULT = ZPOOL_MM_RW 36 ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
37}; 37};
38 38
39bool zpool_has_pool(char *type);
40
39struct zpool *zpool_create_pool(char *type, char *name, 41struct zpool *zpool_create_pool(char *type, char *name,
40 gfp_t gfp, struct zpool_ops *ops); 42 gfp_t gfp, const struct zpool_ops *ops);
41 43
42char *zpool_get_type(struct zpool *pool); 44char *zpool_get_type(struct zpool *pool);
43 45
@@ -81,7 +83,7 @@ struct zpool_driver {
81 atomic_t refcount; 83 atomic_t refcount;
82 struct list_head list; 84 struct list_head list;
83 85
84 void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops, 86 void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops,
85 struct zpool *zpool); 87 struct zpool *zpool);
86 void (*destroy)(void *pool); 88 void (*destroy)(void *pool);
87 89
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 1338190b5478..6398dfae53f1 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -34,6 +34,11 @@ enum zs_mapmode {
34 */ 34 */
35}; 35};
36 36
37struct zs_pool_stats {
38 /* How many pages were migrated (freed) */
39 unsigned long pages_compacted;
40};
41
37struct zs_pool; 42struct zs_pool;
38 43
39struct zs_pool *zs_create_pool(char *name, gfp_t flags); 44struct zs_pool *zs_create_pool(char *name, gfp_t flags);
@@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
49unsigned long zs_get_total_pages(struct zs_pool *pool); 54unsigned long zs_get_total_pages(struct zs_pool *pool);
50unsigned long zs_compact(struct zs_pool *pool); 55unsigned long zs_compact(struct zs_pool *pool);
51 56
57void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
52#endif 58#endif