aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h26
-rw-r--r--include/linux/ahci_platform.h6
-rw-r--r--include/linux/amba/bus.h13
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/audit.h1
-rw-r--r--include/linux/backing-dev.h53
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/bcma/bcma_regs.h2
-rw-r--r--include/linux/bcma/bcma_soc.h2
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/bitmap.h59
-rw-r--r--include/linux/bitrev.h77
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/ceph/osd_client.h4
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/clocksource.h102
-rw-r--r--include/linux/compaction.h86
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc4.h4
-rw-r--r--include/linux/compiler-gcc5.h2
-rw-r--r--include/linux/compiler.h39
-rw-r--r--include/linux/coresight.h22
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/cpuidle.h13
-rw-r--r--include/linux/cpumask.h71
-rw-r--r--include/linux/crypto.h11
-rw-r--r--include/linux/cryptohash.h2
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/debugfs.h18
-rw-r--r--include/linux/devfreq-event.h196
-rw-r--r--include/linux/device-mapper.h10
-rw-r--r--include/linux/device.h62
-rw-r--r--include/linux/dqblk_v1.h3
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/enclosure.h13
-rw-r--r--include/linux/etherdevice.h4
-rw-r--r--include/linux/exportfs.h23
-rw-r--r--include/linux/f2fs_fs.h7
-rw-r--r--include/linux/fb.h12
-rw-r--r--include/linux/fec.h1
-rw-r--r--include/linux/fs.h144
-rw-r--r--include/linux/fs_pin.h25
-rw-r--r--include/linux/fsnotify.h6
-rw-r--r--include/linux/ftrace_event.h6
-rw-r--r--include/linux/genetlink.h4
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio/consumer.h17
-rw-r--r--include/linux/hdmi.h37
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/host1x.h18
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--include/linux/hugetlb.h10
-rw-r--r--include/linux/hw_random.h4
-rw-r--r--include/linux/hyperv.h38
-rw-r--r--include/linux/i2c.h6
-rw-r--r--include/linux/i2o.h988
-rw-r--r--include/linux/ieee80211.h27
-rw-r--r--include/linux/if_bridge.h18
-rw-r--r--include/linux/if_vlan.h74
-rw-r--r--include/linux/iio/buffer.h76
-rw-r--r--include/linux/iio/common/ssp_sensors.h82
-rw-r--r--include/linux/iio/consumer.h12
-rw-r--r--include/linux/iio/iio.h11
-rw-r--r--include/linux/iio/kfifo_buf.h5
-rw-r--r--include/linux/iio/types.h14
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/input/mt.h3
-rw-r--r--include/linux/iopoll.h144
-rw-r--r--include/linux/iova.h41
-rw-r--r--include/linux/ipv6.h13
-rw-r--r--include/linux/irqchip/arm-gic-v3.h44
-rw-r--r--include/linux/irqchip/irq-omap-intc.h2
-rw-r--r--include/linux/jbd.h9
-rw-r--r--include/linux/jbd2.h9
-rw-r--r--include/linux/kasan.h89
-rw-r--r--include/linux/kdb.h62
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/kernfs.h7
-rw-r--r--include/linux/kexec.h22
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/ktime.h17
-rw-r--r--include/linux/kvm_host.h28
-rw-r--r--include/linux/led-class-flash.h207
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/libata.h13
-rw-r--r--include/linux/list_lru.h82
-rw-r--r--include/linux/list_nulls.h6
-rw-r--r--include/linux/livepatch.h133
-rw-r--r--include/linux/lockref.h3
-rw-r--r--include/linux/mei_cl_bus.h4
-rw-r--r--include/linux/memcontrol.h92
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h1
-rw-r--r--include/linux/mfd/max77693-private.h108
-rw-r--r--include/linux/mfd/samsung/s2mps13.h2
-rw-r--r--include/linux/mfd/stmpe.h16
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h117
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h173
-rw-r--r--include/linux/mfd/syscon/exynos4-pmu.h21
-rw-r--r--include/linux/mfd/tc3589x.h12
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h3
-rw-r--r--include/linux/mfd/tmio.h28
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/mlx4/cmd.h16
-rw-r--r--include/linux/mlx4/device.h63
-rw-r--r--include/linux/mlx4/driver.h19
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mm.h158
-rw-r--r--include/linux/mm_types.h23
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmc/sdhci.h11
-rw-r--r--include/linux/mmc/sdio_ids.h6
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h15
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmzone.h15
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/moduleloader.h4
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/ubi.h53
-rw-r--r--include/linux/mutex.h1
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h94
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h12
-rw-r--r--include/linux/nfs_idmap.h2
-rw-r--r--include/linux/nfs_page.h22
-rw-r--r--include/linux/nfs_xdr.h19
-rw-r--r--include/linux/nodemask.h67
-rw-r--r--include/linux/nvme.h3
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/oom.h23
-rw-r--r--include/linux/osq_lock.h12
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/page_counter.h3
-rw-r--r--include/linux/page_ext.h2
-rw-r--r--include/linux/pci.h27
-rw-r--r--include/linux/percpu-refcount.h34
-rw-r--r--include/linux/perf_event.h65
-rw-r--r--include/linux/perf_regs.h16
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/phy/omap_control_phy.h6
-rw-r--r--include/linux/phy/phy-qcom-ufs.h59
-rw-r--r--include/linux/pid_namespace.h4
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h29
-rw-r--r--include/linux/pinctrl/pinctrl.h12
-rw-r--r--include/linux/platform_data/cpuidle-exynos.h20
-rw-r--r--include/linux/platform_data/ipmmu-vmsa.h24
-rw-r--r--include/linux/platform_data/irda-sa11x0.h20
-rw-r--r--include/linux/platform_data/mmc-omap.h4
-rw-r--r--include/linux/platform_data/regulator-haptic.h29
-rw-r--r--include/linux/platform_data/st21nfca.h2
-rw-r--r--include/linux/platform_data/st21nfcb.h4
-rw-r--r--include/linux/platform_data/tpm_stm_st33.h39
-rw-r--r--include/linux/platform_data/vsp1.h27
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h4
-rw-r--r--include/linux/pnp.h12
-rw-r--r--include/linux/power/charger-manager.h32
-rw-r--r--include/linux/printk.h21
-rw-r--r--include/linux/pstore.h1
-rw-r--r--include/linux/pstore_ram.h1
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/quota.h69
-rw-r--r--include/linux/quotaops.h7
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/rculist.h16
-rw-r--r--include/linux/rcupdate.h13
-rw-r--r--include/linux/rcutiny.h45
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regmap.h2
-rw-r--r--include/linux/regulator/da9211.h2
-rw-r--r--include/linux/regulator/driver.h13
-rw-r--r--include/linux/regulator/machine.h13
-rw-r--r--include/linux/regulator/mt6397-regulator.h49
-rw-r--r--include/linux/regulator/pfuze100.h14
-rw-r--r--include/linux/resource_ext.h77
-rw-r--r--include/linux/rhashtable.h308
-rw-r--r--include/linux/rmap.h14
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/rtc/ds1685.h375
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/security.h58
-rw-r--r--include/linux/seq_buf.h3
-rw-r--r--include/linux/seq_file.h25
-rw-r--r--include/linux/serial_8250.h4
-rw-r--r--include/linux/serial_core.h22
-rw-r--r--include/linux/serial_s3c.h28
-rw-r--r--include/linux/shrinker.h6
-rw-r--r--include/linux/skbuff.h44
-rw-r--r--include/linux/slab.h45
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slub_def.h21
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/socket.h7
-rw-r--r--include/linux/spi/at86rf230.h4
-rw-r--r--include/linux/spi/l4f00242t03.h4
-rw-r--r--include/linux/spi/lms283gf05.h4
-rw-r--r--include/linux/spi/mxs-spi.h4
-rw-r--r--include/linux/spi/pxa2xx_spi.h5
-rw-r--r--include/linux/spi/rspi.h5
-rw-r--r--include/linux/spi/sh_hspi.h4
-rw-r--r--include/linux/spi/sh_msiof.h2
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/tle62x0.h4
-rw-r--r--include/linux/spi/tsc2005.h5
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/ssb/ssb_regs.h1
-rw-r--r--include/linux/string.h6
-rw-r--r--include/linux/string_helpers.h4
-rw-r--r--include/linux/sunrpc/clnt.h3
-rw-r--r--include/linux/sunrpc/metrics.h4
-rw-r--r--include/linux/sunrpc/rpc_rdma.h14
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h15
-rw-r--r--include/linux/sunrpc/xprt.h6
-rw-r--r--include/linux/suspend.h16
-rw-r--r--include/linux/swap.h15
-rw-r--r--include/linux/swapops.h8
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/linux/ti_wilink_st.h13
-rw-r--r--include/linux/tick.h6
-rw-r--r--include/linux/time.h13
-rw-r--r--include/linux/timecounter.h139
-rw-r--r--include/linux/timekeeping.h21
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/tty.h25
-rw-r--r--include/linux/types.h8
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/usb.h7
-rw-r--r--include/linux/usb/ehci_pdriver.h4
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/phy.h4
-rw-r--r--include/linux/usb/usb_phy_generic.h2
-rw-r--r--include/linux/vmalloc.h13
-rw-r--r--include/linux/vmw_vmci_api.h2
-rw-r--r--include/linux/vt_buffer.h4
-rw-r--r--include/linux/wait.h42
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/zpool.h5
-rw-r--r--include/linux/zsmalloc.h2
260 files changed, 4823 insertions, 2472 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 856d381b1d5b..24c7aa8b1d20 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -27,6 +27,7 @@
27 27
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/ioport.h> /* for struct resource */ 29#include <linux/ioport.h> /* for struct resource */
30#include <linux/resource_ext.h>
30#include <linux/device.h> 31#include <linux/device.h>
31#include <linux/property.h> 32#include <linux/property.h>
32 33
@@ -147,10 +148,14 @@ void acpi_numa_arch_fixup(void);
147 148
148#ifdef CONFIG_ACPI_HOTPLUG_CPU 149#ifdef CONFIG_ACPI_HOTPLUG_CPU
149/* Arch dependent functions for cpu hotplug support */ 150/* Arch dependent functions for cpu hotplug support */
150int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); 151int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
151int acpi_unmap_lsapic(int cpu); 152int acpi_unmap_cpu(int cpu);
152#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 153#endif /* CONFIG_ACPI_HOTPLUG_CPU */
153 154
155#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
156int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
157#endif
158
154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); 159int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
155int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); 160int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
156int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); 161int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
@@ -288,22 +293,25 @@ extern int pnpacpi_disabled;
288bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); 293bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res);
289bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); 294bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res);
290bool acpi_dev_resource_address_space(struct acpi_resource *ares, 295bool acpi_dev_resource_address_space(struct acpi_resource *ares,
291 struct resource *res); 296 struct resource_win *win);
292bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, 297bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
293 struct resource *res); 298 struct resource_win *win);
294unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); 299unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
295bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, 300bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
296 struct resource *res); 301 struct resource *res);
297 302
298struct resource_list_entry {
299 struct list_head node;
300 struct resource res;
301};
302
303void acpi_dev_free_resource_list(struct list_head *list); 303void acpi_dev_free_resource_list(struct list_head *list);
304int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, 304int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
305 int (*preproc)(struct acpi_resource *, void *), 305 int (*preproc)(struct acpi_resource *, void *),
306 void *preproc_data); 306 void *preproc_data);
307int acpi_dev_filter_resource_type(struct acpi_resource *ares,
308 unsigned long types);
309
310static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
311 void *arg)
312{
313 return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
314}
307 315
308int acpi_check_resource_conflict(const struct resource *res); 316int acpi_check_resource_conflict(const struct resource *res);
309 317
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 642d6ae4030c..a270f25ee7c7 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -21,16 +21,20 @@ struct device;
21struct ata_port_info; 21struct ata_port_info;
22struct ahci_host_priv; 22struct ahci_host_priv;
23struct platform_device; 23struct platform_device;
24struct scsi_host_template;
24 25
25int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); 26int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
26void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); 27void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
28int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
29void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
27int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); 30int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
28void ahci_platform_disable_resources(struct ahci_host_priv *hpriv); 31void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
29struct ahci_host_priv *ahci_platform_get_resources( 32struct ahci_host_priv *ahci_platform_get_resources(
30 struct platform_device *pdev); 33 struct platform_device *pdev);
31int ahci_platform_init_host(struct platform_device *pdev, 34int ahci_platform_init_host(struct platform_device *pdev,
32 struct ahci_host_priv *hpriv, 35 struct ahci_host_priv *hpriv,
33 const struct ata_port_info *pi_template); 36 const struct ata_port_info *pi_template,
37 struct scsi_host_template *sht);
34 38
35int ahci_platform_suspend_host(struct device *dev); 39int ahci_platform_suspend_host(struct device *dev);
36int ahci_platform_resume_host(struct device *dev); 40int ahci_platform_resume_host(struct device *dev);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 2afc618b15ce..50fc66868402 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -33,6 +33,7 @@ struct amba_device {
33 struct clk *pclk; 33 struct clk *pclk;
34 unsigned int periphid; 34 unsigned int periphid;
35 unsigned int irq[AMBA_NR_IRQS]; 35 unsigned int irq[AMBA_NR_IRQS];
36 char *driver_override;
36}; 37};
37 38
38struct amba_driver { 39struct amba_driver {
@@ -92,11 +93,15 @@ struct amba_device *amba_find_device(const char *, struct device *, unsigned int
92int amba_request_regions(struct amba_device *, const char *); 93int amba_request_regions(struct amba_device *, const char *);
93void amba_release_regions(struct amba_device *); 94void amba_release_regions(struct amba_device *);
94 95
95#define amba_pclk_enable(d) \ 96static inline int amba_pclk_enable(struct amba_device *dev)
96 (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk)) 97{
98 return clk_enable(dev->pclk);
99}
97 100
98#define amba_pclk_disable(d) \ 101static inline void amba_pclk_disable(struct amba_device *dev)
99 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 102{
103 clk_disable(dev->pclk);
104}
100 105
101static inline int amba_pclk_prepare(struct amba_device *dev) 106static inline int amba_pclk_prepare(struct amba_device *dev)
102{ 107{
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f2f4d8da97c0..1648026e06b4 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -503,7 +503,7 @@ struct ata_bmdma_prd {
503#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) 503#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8))
504#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8)) 504#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
505#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) 505#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
506#define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) 506#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
507#define ata_id_has_atapi_AN(id) \ 507#define ata_id_has_atapi_AN(id) \
508 ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ 508 ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
509 ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ 509 ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 5c618a084225..619d9e78e644 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -10,12 +10,15 @@ struct pata_platform_info {
10 unsigned int ioport_shift; 10 unsigned int ioport_shift;
11}; 11};
12 12
13struct scsi_host_template;
14
13extern int __pata_platform_probe(struct device *dev, 15extern int __pata_platform_probe(struct device *dev,
14 struct resource *io_res, 16 struct resource *io_res,
15 struct resource *ctl_res, 17 struct resource *ctl_res,
16 struct resource *irq_res, 18 struct resource *irq_res,
17 unsigned int ioport_shift, 19 unsigned int ioport_shift,
18 int __pio_mask); 20 int __pio_mask,
21 struct scsi_host_template *sht);
19 22
20/* 23/*
21 * Marvell SATA private data 24 * Marvell SATA private data
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 87c2d347d255..c2e7e3a83965 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -46,7 +46,6 @@ struct audit_tree;
46struct sk_buff; 46struct sk_buff;
47 47
48struct audit_krule { 48struct audit_krule {
49 int vers_ops;
50 u32 pflags; 49 u32 pflags;
51 u32 flags; 50 u32 flags;
52 u32 listnr; 51 u32 listnr;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5da6012b7a14..d94077fea1f8 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -106,6 +106,8 @@ struct backing_dev_info {
106#endif 106#endif
107}; 107};
108 108
109struct backing_dev_info *inode_to_bdi(struct inode *inode);
110
109int __must_check bdi_init(struct backing_dev_info *bdi); 111int __must_check bdi_init(struct backing_dev_info *bdi);
110void bdi_destroy(struct backing_dev_info *bdi); 112void bdi_destroy(struct backing_dev_info *bdi);
111 113
@@ -114,7 +116,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
114 const char *fmt, ...); 116 const char *fmt, ...);
115int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 117int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
116void bdi_unregister(struct backing_dev_info *bdi); 118void bdi_unregister(struct backing_dev_info *bdi);
117int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 119int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
118void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 120void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
119 enum wb_reason reason); 121 enum wb_reason reason);
120void bdi_start_background_writeback(struct backing_dev_info *bdi); 122void bdi_start_background_writeback(struct backing_dev_info *bdi);
@@ -228,46 +230,17 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
228 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting 230 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
229 * BDI_CAP_NO_WRITEBACK: Don't write pages back 231 * BDI_CAP_NO_WRITEBACK: Don't write pages back
230 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 232 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
231 *
232 * These flags let !MMU mmap() govern direct device mapping vs immediate
233 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
234 *
235 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
236 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
237 * BDI_CAP_READ_MAP: Can be mapped for reading
238 * BDI_CAP_WRITE_MAP: Can be mapped for writing
239 * BDI_CAP_EXEC_MAP: Can be mapped for execution
240 *
241 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
242 *
243 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 233 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
244 */ 234 */
245#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 235#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
246#define BDI_CAP_NO_WRITEBACK 0x00000002 236#define BDI_CAP_NO_WRITEBACK 0x00000002
247#define BDI_CAP_MAP_COPY 0x00000004 237#define BDI_CAP_NO_ACCT_WB 0x00000004
248#define BDI_CAP_MAP_DIRECT 0x00000008 238#define BDI_CAP_STABLE_WRITES 0x00000008
249#define BDI_CAP_READ_MAP 0x00000010 239#define BDI_CAP_STRICTLIMIT 0x00000010
250#define BDI_CAP_WRITE_MAP 0x00000020
251#define BDI_CAP_EXEC_MAP 0x00000040
252#define BDI_CAP_NO_ACCT_WB 0x00000080
253#define BDI_CAP_SWAP_BACKED 0x00000100
254#define BDI_CAP_STABLE_WRITES 0x00000200
255#define BDI_CAP_STRICTLIMIT 0x00000400
256
257#define BDI_CAP_VMFLAGS \
258 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
259 240
260#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 241#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
261 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 242 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
262 243
263#if defined(VM_MAYREAD) && \
264 (BDI_CAP_READ_MAP != VM_MAYREAD || \
265 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
266 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
267#error please change backing_dev_info::capabilities flags
268#endif
269
270extern struct backing_dev_info default_backing_dev_info;
271extern struct backing_dev_info noop_backing_dev_info; 244extern struct backing_dev_info noop_backing_dev_info;
272 245
273int writeback_in_progress(struct backing_dev_info *bdi); 246int writeback_in_progress(struct backing_dev_info *bdi);
@@ -329,24 +302,14 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
329 BDI_CAP_NO_WRITEBACK)); 302 BDI_CAP_NO_WRITEBACK));
330} 303}
331 304
332static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
333{
334 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
335}
336
337static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 305static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
338{ 306{
339 return bdi_cap_writeback_dirty(mapping->backing_dev_info); 307 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
340} 308}
341 309
342static inline bool mapping_cap_account_dirty(struct address_space *mapping) 310static inline bool mapping_cap_account_dirty(struct address_space *mapping)
343{ 311{
344 return bdi_cap_account_dirty(mapping->backing_dev_info); 312 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
345}
346
347static inline bool mapping_cap_swap_backed(struct address_space *mapping)
348{
349 return bdi_cap_swap_backed(mapping->backing_dev_info);
350} 313}
351 314
352static inline int bdi_sched_wait(void *word) 315static inline int bdi_sched_wait(void *word)
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index eb1c6a47b67f..994739da827f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -318,6 +318,7 @@ struct bcma_bus {
318 const struct bcma_host_ops *ops; 318 const struct bcma_host_ops *ops;
319 319
320 enum bcma_hosttype hosttype; 320 enum bcma_hosttype hosttype;
321 bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
321 union { 322 union {
322 /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ 323 /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
323 struct pci_dev *host_pci; 324 struct pci_dev *host_pci;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 0333e605ea0d..3f809ae372c4 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -223,6 +223,7 @@ struct bcma_drv_pci_host {
223 223
224struct bcma_drv_pci { 224struct bcma_drv_pci {
225 struct bcma_device *core; 225 struct bcma_device *core;
226 u8 early_setup_done:1;
226 u8 setup_done:1; 227 u8 setup_done:1;
227 u8 hostmode:1; 228 u8 hostmode:1;
228 229
@@ -237,6 +238,7 @@ struct bcma_drv_pci {
237#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) 238#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
238#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) 239#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
239 240
241extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
240extern void bcma_core_pci_init(struct bcma_drv_pci *pc); 242extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
241extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, 243extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
242 struct bcma_device *core, bool enable); 244 struct bcma_device *core, bool enable);
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index e64ae7bf80a1..ebd5c1fcdea4 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -64,6 +64,8 @@
64#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ 64#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */
65#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ 65#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */
66 66
67#define BCMA_PCIE2_BAR0_WIN2 0x70
68
67/* SiliconBackplane Address Map. 69/* SiliconBackplane Address Map.
68 * All regions may not exist on all chips. 70 * All regions may not exist on all chips.
69 */ 71 */
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index f24d245f8394..1b5fc0c3b1b5 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -5,8 +5,6 @@
5 5
6struct bcma_soc { 6struct bcma_soc {
7 struct bcma_bus bus; 7 struct bcma_bus bus;
8 struct bcma_device core_cc;
9 struct bcma_device core_mips;
10}; 8};
11 9
12int __init bcma_host_soc_register(struct bcma_soc *soc); 10int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index efead0b532c4..da3a127c9958 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -428,13 +428,9 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
428extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 428extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
429 unsigned int, unsigned int); 429 unsigned int, unsigned int);
430extern int bio_get_nr_vecs(struct block_device *); 430extern int bio_get_nr_vecs(struct block_device *);
431extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
432 unsigned long, unsigned int, int, gfp_t);
433struct sg_iovec;
434struct rq_map_data; 431struct rq_map_data;
435extern struct bio *bio_map_user_iov(struct request_queue *, 432extern struct bio *bio_map_user_iov(struct request_queue *,
436 struct block_device *, 433 const struct iov_iter *, gfp_t);
437 const struct sg_iovec *, int, int, gfp_t);
438extern void bio_unmap_user(struct bio *); 434extern void bio_unmap_user(struct bio *);
439extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 435extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
440 gfp_t); 436 gfp_t);
@@ -462,12 +458,10 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
462extern void bio_copy_data(struct bio *dst, struct bio *src); 458extern void bio_copy_data(struct bio *dst, struct bio *src);
463extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); 459extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
464 460
465extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
466 unsigned long, unsigned int, int, gfp_t);
467extern struct bio *bio_copy_user_iov(struct request_queue *, 461extern struct bio *bio_copy_user_iov(struct request_queue *,
468 struct rq_map_data *, 462 struct rq_map_data *,
469 const struct sg_iovec *, 463 const struct iov_iter *,
470 int, int, gfp_t); 464 gfp_t);
471extern int bio_uncopy_user(struct bio *); 465extern int bio_uncopy_user(struct bio *);
472void zero_fill_bio(struct bio *bio); 466void zero_fill_bio(struct bio *bio);
473extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); 467extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 202e4034fe26..dbfbf4990005 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -52,16 +52,13 @@
52 * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) 52 * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
53 * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap 53 * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
54 * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz 54 * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
55 * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
56 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf 55 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
57 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf 56 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
58 * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
59 * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf 57 * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
60 * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf 58 * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
61 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region 59 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
62 * bitmap_release_region(bitmap, pos, order) Free specified bit region 60 * bitmap_release_region(bitmap, pos, order) Free specified bit region
63 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region 61 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
64 * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
65 */ 62 */
66 63
67/* 64/*
@@ -96,10 +93,10 @@ extern int __bitmap_equal(const unsigned long *bitmap1,
96 const unsigned long *bitmap2, unsigned int nbits); 93 const unsigned long *bitmap2, unsigned int nbits);
97extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, 94extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
98 unsigned int nbits); 95 unsigned int nbits);
99extern void __bitmap_shift_right(unsigned long *dst, 96extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
100 const unsigned long *src, int shift, int bits); 97 unsigned int shift, unsigned int nbits);
101extern void __bitmap_shift_left(unsigned long *dst, 98extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
102 const unsigned long *src, int shift, int bits); 99 unsigned int shift, unsigned int nbits);
103extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 100extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
104 const unsigned long *bitmap2, unsigned int nbits); 101 const unsigned long *bitmap2, unsigned int nbits);
105extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, 102extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
@@ -147,31 +144,31 @@ bitmap_find_next_zero_area(unsigned long *map,
147 align_mask, 0); 144 align_mask, 0);
148} 145}
149 146
150extern int bitmap_scnprintf(char *buf, unsigned int len,
151 const unsigned long *src, int nbits);
152extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, 147extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
153 unsigned long *dst, int nbits); 148 unsigned long *dst, int nbits);
154extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, 149extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
155 unsigned long *dst, int nbits); 150 unsigned long *dst, int nbits);
156extern int bitmap_scnlistprintf(char *buf, unsigned int len,
157 const unsigned long *src, int nbits);
158extern int bitmap_parselist(const char *buf, unsigned long *maskp, 151extern int bitmap_parselist(const char *buf, unsigned long *maskp,
159 int nmaskbits); 152 int nmaskbits);
160extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, 153extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
161 unsigned long *dst, int nbits); 154 unsigned long *dst, int nbits);
162extern void bitmap_remap(unsigned long *dst, const unsigned long *src, 155extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
163 const unsigned long *old, const unsigned long *new, int bits); 156 const unsigned long *old, const unsigned long *new, unsigned int nbits);
164extern int bitmap_bitremap(int oldbit, 157extern int bitmap_bitremap(int oldbit,
165 const unsigned long *old, const unsigned long *new, int bits); 158 const unsigned long *old, const unsigned long *new, int bits);
166extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, 159extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
167 const unsigned long *relmap, int bits); 160 const unsigned long *relmap, unsigned int bits);
168extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, 161extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
169 int sz, int bits); 162 unsigned int sz, unsigned int nbits);
170extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); 163extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
171extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); 164extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
172extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); 165extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
173extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); 166#ifdef __BIG_ENDIAN
174extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); 167extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
168#else
169#define bitmap_copy_le bitmap_copy
170#endif
171extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
175extern int bitmap_print_to_pagebuf(bool list, char *buf, 172extern int bitmap_print_to_pagebuf(bool list, char *buf,
176 const unsigned long *maskp, int nmaskbits); 173 const unsigned long *maskp, int nmaskbits);
177 174
@@ -185,33 +182,33 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
185#define small_const_nbits(nbits) \ 182#define small_const_nbits(nbits) \
186 (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) 183 (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
187 184
188static inline void bitmap_zero(unsigned long *dst, int nbits) 185static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
189{ 186{
190 if (small_const_nbits(nbits)) 187 if (small_const_nbits(nbits))
191 *dst = 0UL; 188 *dst = 0UL;
192 else { 189 else {
193 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 190 unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
194 memset(dst, 0, len); 191 memset(dst, 0, len);
195 } 192 }
196} 193}
197 194
198static inline void bitmap_fill(unsigned long *dst, int nbits) 195static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
199{ 196{
200 size_t nlongs = BITS_TO_LONGS(nbits); 197 unsigned int nlongs = BITS_TO_LONGS(nbits);
201 if (!small_const_nbits(nbits)) { 198 if (!small_const_nbits(nbits)) {
202 int len = (nlongs - 1) * sizeof(unsigned long); 199 unsigned int len = (nlongs - 1) * sizeof(unsigned long);
203 memset(dst, 0xff, len); 200 memset(dst, 0xff, len);
204 } 201 }
205 dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); 202 dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
206} 203}
207 204
208static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 205static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
209 int nbits) 206 unsigned int nbits)
210{ 207{
211 if (small_const_nbits(nbits)) 208 if (small_const_nbits(nbits))
212 *dst = *src; 209 *dst = *src;
213 else { 210 else {
214 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); 211 unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
215 memcpy(dst, src, len); 212 memcpy(dst, src, len);
216 } 213 }
217} 214}
@@ -309,22 +306,22 @@ static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
309 return __bitmap_weight(src, nbits); 306 return __bitmap_weight(src, nbits);
310} 307}
311 308
312static inline void bitmap_shift_right(unsigned long *dst, 309static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
313 const unsigned long *src, int n, int nbits) 310 unsigned int shift, int nbits)
314{ 311{
315 if (small_const_nbits(nbits)) 312 if (small_const_nbits(nbits))
316 *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n; 313 *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
317 else 314 else
318 __bitmap_shift_right(dst, src, n, nbits); 315 __bitmap_shift_right(dst, src, shift, nbits);
319} 316}
320 317
321static inline void bitmap_shift_left(unsigned long *dst, 318static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
322 const unsigned long *src, int n, int nbits) 319 unsigned int shift, unsigned int nbits)
323{ 320{
324 if (small_const_nbits(nbits)) 321 if (small_const_nbits(nbits))
325 *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); 322 *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
326 else 323 else
327 __bitmap_shift_left(dst, src, n, nbits); 324 __bitmap_shift_left(dst, src, shift, nbits);
328} 325}
329 326
330static inline int bitmap_parse(const char *buf, unsigned int buflen, 327static inline int bitmap_parse(const char *buf, unsigned int buflen,
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index 7ffe03f4693d..fb790b8449c1 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -3,14 +3,83 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6extern u8 const byte_rev_table[256]; 6#ifdef CONFIG_HAVE_ARCH_BITREVERSE
7#include <asm/bitrev.h>
8
9#define __bitrev32 __arch_bitrev32
10#define __bitrev16 __arch_bitrev16
11#define __bitrev8 __arch_bitrev8
7 12
8static inline u8 bitrev8(u8 byte) 13#else
14extern u8 const byte_rev_table[256];
15static inline u8 __bitrev8(u8 byte)
9{ 16{
10 return byte_rev_table[byte]; 17 return byte_rev_table[byte];
11} 18}
12 19
13extern u16 bitrev16(u16 in); 20static inline u16 __bitrev16(u16 x)
14extern u32 bitrev32(u32 in); 21{
22 return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8);
23}
24
25static inline u32 __bitrev32(u32 x)
26{
27 return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16);
28}
29
30#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
31
32#define __constant_bitrev32(x) \
33({ \
34 u32 __x = x; \
35 __x = (__x >> 16) | (__x << 16); \
36 __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
37 __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
38 __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
39 __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
40 __x; \
41})
42
43#define __constant_bitrev16(x) \
44({ \
45 u16 __x = x; \
46 __x = (__x >> 8) | (__x << 8); \
47 __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
48 __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
49 __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
50 __x; \
51})
52
53#define __constant_bitrev8(x) \
54({ \
55 u8 __x = x; \
56 __x = (__x >> 4) | (__x << 4); \
57 __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
58 __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
59 __x; \
60})
61
62#define bitrev32(x) \
63({ \
64 u32 __x = x; \
65 __builtin_constant_p(__x) ? \
66 __constant_bitrev32(__x) : \
67 __bitrev32(__x); \
68})
69
70#define bitrev16(x) \
71({ \
72 u16 __x = x; \
73 __builtin_constant_p(__x) ? \
74 __constant_bitrev16(__x) : \
75 __bitrev16(__x); \
76 })
15 77
78#define bitrev8(x) \
79({ \
80 u8 __x = x; \
81 __builtin_constant_p(__x) ? \
82 __constant_bitrev8(__x) : \
83 __bitrev8(__x) ; \
84 })
16#endif /* _LINUX_BITREV_H */ 85#endif /* _LINUX_BITREV_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aded9ab2e4e..7aec86127335 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
34 unsigned long flags; /* BLK_MQ_F_* flags */ 34 unsigned long flags; /* BLK_MQ_F_* flags */
35 35
36 struct request_queue *queue; 36 struct request_queue *queue;
37 unsigned int queue_num;
38 struct blk_flush_queue *fq; 37 struct blk_flush_queue *fq;
39 38
40 void *driver_data; 39 void *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
54 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 53 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
55 54
56 unsigned int numa_node; 55 unsigned int numa_node;
57 unsigned int cmd_size; /* per-request extra data */ 56 unsigned int queue_num;
58 57
59 atomic_t nr_active; 58 atomic_t nr_active;
60 59
@@ -147,6 +146,8 @@ enum {
147 BLK_MQ_F_SG_MERGE = 1 << 2, 146 BLK_MQ_F_SG_MERGE = 1 << 2,
148 BLK_MQ_F_SYSFS_UP = 1 << 3, 147 BLK_MQ_F_SYSFS_UP = 1 << 3,
149 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 148 BLK_MQ_F_DEFER_ISSUE = 1 << 4,
149 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
150 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
150 151
151 BLK_MQ_S_STOPPED = 0, 152 BLK_MQ_S_STOPPED = 0,
152 BLK_MQ_S_TAG_ACTIVE = 1, 153 BLK_MQ_S_TAG_ACTIVE = 1,
@@ -155,6 +156,12 @@ enum {
155 156
156 BLK_MQ_CPU_WORK_BATCH = 8, 157 BLK_MQ_CPU_WORK_BATCH = 8,
157}; 158};
159#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
160 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
161 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
162#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
163 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
164 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
158 165
159struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 166struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
160void blk_mq_finish_init(struct request_queue *q); 167void blk_mq_finish_init(struct request_queue *q);
@@ -167,7 +174,6 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
167void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 174void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
168 175
169void blk_mq_insert_request(struct request *, bool, bool, bool); 176void blk_mq_insert_request(struct request *, bool, bool, bool);
170void blk_mq_run_queues(struct request_queue *q, bool async);
171void blk_mq_free_request(struct request *rq); 177void blk_mq_free_request(struct request *rq);
172void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); 178void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
173bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 179bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
@@ -195,13 +201,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
195struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 201struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
196struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 202struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
197 203
204int blk_mq_request_started(struct request *rq);
198void blk_mq_start_request(struct request *rq); 205void blk_mq_start_request(struct request *rq);
199void blk_mq_end_request(struct request *rq, int error); 206void blk_mq_end_request(struct request *rq, int error);
200void __blk_mq_end_request(struct request *rq, int error); 207void __blk_mq_end_request(struct request *rq, int error);
201 208
202void blk_mq_requeue_request(struct request *rq); 209void blk_mq_requeue_request(struct request *rq);
203void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 210void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
211void blk_mq_cancel_requeue_work(struct request_queue *q);
204void blk_mq_kick_requeue_list(struct request_queue *q); 212void blk_mq_kick_requeue_list(struct request_queue *q);
213void blk_mq_abort_requeue_list(struct request_queue *q);
205void blk_mq_complete_request(struct request *rq); 214void blk_mq_complete_request(struct request *rq);
206 215
207void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 216void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +221,9 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
212void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 221void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
213void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 222void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
214 void *priv); 223 void *priv);
224void blk_mq_freeze_queue(struct request_queue *q);
225void blk_mq_unfreeze_queue(struct request_queue *q);
226void blk_mq_freeze_queue_start(struct request_queue *q);
215 227
216/* 228/*
217 * Driver command data is immediately after the request. So subtract request 229 * Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 445d59231bc4..c294e3e25e37 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -190,6 +190,7 @@ enum rq_flag_bits {
190 __REQ_PM, /* runtime pm request */ 190 __REQ_PM, /* runtime pm request */
191 __REQ_HASHED, /* on IO scheduler merge hash */ 191 __REQ_HASHED, /* on IO scheduler merge hash */
192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
193 __REQ_NO_TIMEOUT, /* requests may never expire */
193 __REQ_NR_BITS, /* stops here */ 194 __REQ_NR_BITS, /* stops here */
194}; 195};
195 196
@@ -243,5 +244,6 @@ enum rq_flag_bits {
243#define REQ_PM (1ULL << __REQ_PM) 244#define REQ_PM (1ULL << __REQ_PM)
244#define REQ_HASHED (1ULL << __REQ_HASHED) 245#define REQ_HASHED (1ULL << __REQ_HASHED)
245#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 246#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
247#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
246 248
247#endif /* __LINUX_BLK_TYPES_H */ 249#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 92f4b4b288dd..7f9a516f24de 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -272,7 +272,11 @@ struct blk_queue_tag {
272 int max_depth; /* what we will send to device */ 272 int max_depth; /* what we will send to device */
273 int real_max_depth; /* what the array can hold */ 273 int real_max_depth; /* what the array can hold */
274 atomic_t refcnt; /* map can be shared */ 274 atomic_t refcnt; /* map can be shared */
275 int alloc_policy; /* tag allocation policy */
276 int next_tag; /* next tag */
275}; 277};
278#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
279#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
276 280
277#define BLK_SCSI_MAX_CMDS (256) 281#define BLK_SCSI_MAX_CMDS (256)
278#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 282#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
@@ -516,6 +520,7 @@ struct request_queue {
516 (1 << QUEUE_FLAG_ADD_RANDOM)) 520 (1 << QUEUE_FLAG_ADD_RANDOM))
517 521
518#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 522#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
523 (1 << QUEUE_FLAG_STACKABLE) | \
519 (1 << QUEUE_FLAG_SAME_COMP)) 524 (1 << QUEUE_FLAG_SAME_COMP))
520 525
521static inline void queue_lockdep_assert_held(struct request_queue *q) 526static inline void queue_lockdep_assert_held(struct request_queue *q)
@@ -850,8 +855,8 @@ extern int blk_rq_map_user(struct request_queue *, struct request *,
850extern int blk_rq_unmap_user(struct bio *); 855extern int blk_rq_unmap_user(struct bio *);
851extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 856extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
852extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 857extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
853 struct rq_map_data *, const struct sg_iovec *, 858 struct rq_map_data *, const struct iov_iter *,
854 int, unsigned int, gfp_t); 859 gfp_t);
855extern int blk_execute_rq(struct request_queue *, struct gendisk *, 860extern int blk_execute_rq(struct request_queue *, struct gendisk *,
856 struct request *, int); 861 struct request *, int);
857extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 862extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@ -1044,8 +1049,6 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1044extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 1049extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1045 1050
1046extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1051extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1047extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
1048 struct scatterlist *sglist);
1049extern void blk_dump_rq_flags(struct request *, char *); 1052extern void blk_dump_rq_flags(struct request *, char *);
1050extern long nr_blockdev_pages(void); 1053extern long nr_blockdev_pages(void);
1051 1054
@@ -1139,11 +1142,11 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1139extern int blk_queue_start_tag(struct request_queue *, struct request *); 1142extern int blk_queue_start_tag(struct request_queue *, struct request *);
1140extern struct request *blk_queue_find_tag(struct request_queue *, int); 1143extern struct request *blk_queue_find_tag(struct request_queue *, int);
1141extern void blk_queue_end_tag(struct request_queue *, struct request *); 1144extern void blk_queue_end_tag(struct request_queue *, struct request *);
1142extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 1145extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1143extern void blk_queue_free_tags(struct request_queue *); 1146extern void blk_queue_free_tags(struct request_queue *);
1144extern int blk_queue_resize_tags(struct request_queue *, int); 1147extern int blk_queue_resize_tags(struct request_queue *, int);
1145extern void blk_queue_invalidate_tags(struct request_queue *); 1148extern void blk_queue_invalidate_tags(struct request_queue *);
1146extern struct blk_queue_tag *blk_init_tags(int); 1149extern struct blk_queue_tag *blk_init_tags(int, int);
1147extern void blk_free_tags(struct blk_queue_tag *); 1150extern void blk_free_tags(struct blk_queue_tag *);
1148 1151
1149static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1152static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
@@ -1162,7 +1165,7 @@ extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1162extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1165extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1163 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1166 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1164extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1167extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1165 sector_t nr_sects, gfp_t gfp_mask); 1168 sector_t nr_sects, gfp_t gfp_mask, bool discard);
1166static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1169static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1167 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1170 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1168{ 1171{
@@ -1176,7 +1179,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1176 return blkdev_issue_zeroout(sb->s_bdev, 1179 return blkdev_issue_zeroout(sb->s_bdev,
1177 block << (sb->s_blocksize_bits - 9), 1180 block << (sb->s_blocksize_bits - 9),
1178 nr_blocks << (sb->s_blocksize_bits - 9), 1181 nr_blocks << (sb->s_blocksize_bits - 9),
1179 gfp_mask); 1182 gfp_mask, true);
1180} 1183}
1181 1184
1182extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1185extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1601,8 +1604,8 @@ struct block_device_operations {
1601 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1604 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1602 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1605 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1603 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1606 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1604 int (*direct_access) (struct block_device *, sector_t, 1607 long (*direct_access)(struct block_device *, sector_t,
1605 void **, unsigned long *); 1608 void **, unsigned long *pfn, long size);
1606 unsigned int (*check_events) (struct gendisk *disk, 1609 unsigned int (*check_events) (struct gendisk *disk,
1607 unsigned int clearing); 1610 unsigned int clearing);
1608 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1611 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1620,6 +1623,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1620extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1623extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1621extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1624extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1622 struct writeback_control *); 1625 struct writeback_control *);
1626extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
1627 unsigned long *pfn, long size);
1623#else /* CONFIG_BLOCK */ 1628#else /* CONFIG_BLOCK */
1624 1629
1625struct block_device; 1630struct block_device;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index fb4591977b03..f8763615a5f2 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -30,6 +30,4 @@ void cdev_del(struct cdev *);
30 30
31void cd_forget(struct inode *); 31void cd_forget(struct inode *);
32 32
33extern struct backing_dev_info directly_mappable_cdev_bdi;
34
35#endif 33#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5d86416d35f2..61b19c46bdb3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
87 struct ceph_osd_data osd_data; 87 struct ceph_osd_data osd_data;
88 } extent; 88 } extent;
89 struct { 89 struct {
90 __le32 name_len; 90 u32 name_len;
91 __le32 value_len; 91 u32 value_len;
92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ 92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ 93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
94 struct ceph_osd_data osd_data; 94 struct ceph_osd_data osd_data;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index da0dae0600e6..b9cb94c3102a 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -943,6 +943,8 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
943 943
944#else /* !CONFIG_CGROUPS */ 944#else /* !CONFIG_CGROUPS */
945 945
946struct cgroup_subsys_state;
947
946static inline int cgroup_init_early(void) { return 0; } 948static inline int cgroup_init_early(void) { return 0; }
947static inline int cgroup_init(void) { return 0; } 949static inline int cgroup_init(void) { return 0; }
948static inline void cgroup_fork(struct task_struct *p) {} 950static inline void cgroup_fork(struct task_struct *p) {}
@@ -955,6 +957,8 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
955 return -EINVAL; 957 return -EINVAL;
956} 958}
957 959
960static inline void css_put(struct cgroup_subsys_state *css) {}
961
958/* No cgroups - nothing to do */ 962/* No cgroups - nothing to do */
959static inline int cgroup_attach_task_all(struct task_struct *from, 963static inline int cgroup_attach_task_all(struct task_struct *from,
960 struct task_struct *t) 964 struct task_struct *t)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 98c4f9b12b03..e4a96fb14403 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -15,6 +15,10 @@ SUBSYS(cpu)
15SUBSYS(cpuacct) 15SUBSYS(cpuacct)
16#endif 16#endif
17 17
18#if IS_ENABLED(CONFIG_BLK_CGROUP)
19SUBSYS(blkio)
20#endif
21
18#if IS_ENABLED(CONFIG_MEMCG) 22#if IS_ENABLED(CONFIG_MEMCG)
19SUBSYS(memory) 23SUBSYS(memory)
20#endif 24#endif
@@ -31,10 +35,6 @@ SUBSYS(freezer)
31SUBSYS(net_cls) 35SUBSYS(net_cls)
32#endif 36#endif
33 37
34#if IS_ENABLED(CONFIG_BLK_CGROUP)
35SUBSYS(blkio)
36#endif
37
38#if IS_ENABLED(CONFIG_CGROUP_PERF) 38#if IS_ENABLED(CONFIG_CGROUP_PERF)
39SUBSYS(perf_event) 39SUBSYS(perf_event)
40#endif 40#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index abcafaa20b86..9c78d15d33e4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -18,8 +18,6 @@
18#include <asm/div64.h> 18#include <asm/div64.h>
19#include <asm/io.h> 19#include <asm/io.h>
20 20
21/* clocksource cycle base type */
22typedef u64 cycle_t;
23struct clocksource; 21struct clocksource;
24struct module; 22struct module;
25 23
@@ -28,106 +26,6 @@ struct module;
28#endif 26#endif
29 27
30/** 28/**
31 * struct cyclecounter - hardware abstraction for a free running counter
32 * Provides completely state-free accessors to the underlying hardware.
33 * Depending on which hardware it reads, the cycle counter may wrap
34 * around quickly. Locking rules (if necessary) have to be defined
35 * by the implementor and user of specific instances of this API.
36 *
37 * @read: returns the current cycle value
38 * @mask: bitmask for two's complement
39 * subtraction of non 64 bit counters,
40 * see CLOCKSOURCE_MASK() helper macro
41 * @mult: cycle to nanosecond multiplier
42 * @shift: cycle to nanosecond divisor (power of two)
43 */
44struct cyclecounter {
45 cycle_t (*read)(const struct cyclecounter *cc);
46 cycle_t mask;
47 u32 mult;
48 u32 shift;
49};
50
51/**
52 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
53 * Contains the state needed by timecounter_read() to detect
54 * cycle counter wrap around. Initialize with
55 * timecounter_init(). Also used to convert cycle counts into the
56 * corresponding nanosecond counts with timecounter_cyc2time(). Users
57 * of this code are responsible for initializing the underlying
58 * cycle counter hardware, locking issues and reading the time
59 * more often than the cycle counter wraps around. The nanosecond
60 * counter will only wrap around after ~585 years.
61 *
62 * @cc: the cycle counter used by this instance
63 * @cycle_last: most recent cycle counter value seen by
64 * timecounter_read()
65 * @nsec: continuously increasing count
66 */
67struct timecounter {
68 const struct cyclecounter *cc;
69 cycle_t cycle_last;
70 u64 nsec;
71};
72
73/**
74 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
75 * @cc: Pointer to cycle counter.
76 * @cycles: Cycles
77 *
78 * XXX - This could use some mult_lxl_ll() asm optimization. Same code
79 * as in cyc2ns, but with unsigned result.
80 */
81static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
82 cycle_t cycles)
83{
84 u64 ret = (u64)cycles;
85 ret = (ret * cc->mult) >> cc->shift;
86 return ret;
87}
88
89/**
90 * timecounter_init - initialize a time counter
91 * @tc: Pointer to time counter which is to be initialized/reset
92 * @cc: A cycle counter, ready to be used.
93 * @start_tstamp: Arbitrary initial time stamp.
94 *
95 * After this call the current cycle register (roughly) corresponds to
96 * the initial time stamp. Every call to timecounter_read() increments
97 * the time stamp counter by the number of elapsed nanoseconds.
98 */
99extern void timecounter_init(struct timecounter *tc,
100 const struct cyclecounter *cc,
101 u64 start_tstamp);
102
103/**
104 * timecounter_read - return nanoseconds elapsed since timecounter_init()
105 * plus the initial time stamp
106 * @tc: Pointer to time counter.
107 *
108 * In other words, keeps track of time since the same epoch as
109 * the function which generated the initial time stamp.
110 */
111extern u64 timecounter_read(struct timecounter *tc);
112
113/**
114 * timecounter_cyc2time - convert a cycle counter to same
115 * time base as values returned by
116 * timecounter_read()
117 * @tc: Pointer to time counter.
118 * @cycle_tstamp: a value returned by tc->cc->read()
119 *
120 * Cycle counts that are converted correctly as long as they
121 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
122 * with "max cycle count" == cs->mask+1.
123 *
124 * This allows conversion of cycle counter values which were generated
125 * in the past.
126 */
127extern u64 timecounter_cyc2time(struct timecounter *tc,
128 cycle_t cycle_tstamp);
129
130/**
131 * struct clocksource - hardware abstraction for a free running counter 29 * struct clocksource - hardware abstraction for a free running counter
132 * Provides mostly state-free accessors to the underlying hardware. 30 * Provides mostly state-free accessors to the underlying hardware.
133 * This is the structure used for system time. 31 * This is the structure used for system time.
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 3238ffa33f68..a014559e4a49 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -12,6 +12,10 @@
12#define COMPACT_PARTIAL 3 12#define COMPACT_PARTIAL 3
13/* The full zone was compacted */ 13/* The full zone was compacted */
14#define COMPACT_COMPLETE 4 14#define COMPACT_COMPLETE 4
15/* For more detailed tracepoint output */
16#define COMPACT_NO_SUITABLE_PAGE 5
17#define COMPACT_NOT_SUITABLE_ZONE 6
18/* When adding new state, please change compaction_status_string, too */
15 19
16/* Used to signal whether compaction detected need_sched() or lock contention */ 20/* Used to signal whether compaction detected need_sched() or lock contention */
17/* No contention detected */ 21/* No contention detected */
@@ -21,6 +25,8 @@
21/* Zone lock or lru_lock was contended in async compaction */ 25/* Zone lock or lru_lock was contended in async compaction */
22#define COMPACT_CONTENDED_LOCK 2 26#define COMPACT_CONTENDED_LOCK 2
23 27
28struct alloc_context; /* in mm/internal.h */
29
24#ifdef CONFIG_COMPACTION 30#ifdef CONFIG_COMPACTION
25extern int sysctl_compact_memory; 31extern int sysctl_compact_memory;
26extern int sysctl_compaction_handler(struct ctl_table *table, int write, 32extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -30,81 +36,25 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
30 void __user *buffer, size_t *length, loff_t *ppos); 36 void __user *buffer, size_t *length, loff_t *ppos);
31 37
32extern int fragmentation_index(struct zone *zone, unsigned int order); 38extern int fragmentation_index(struct zone *zone, unsigned int order);
33extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 39extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
34 int order, gfp_t gfp_mask, nodemask_t *mask, 40 int alloc_flags, const struct alloc_context *ac,
35 enum migrate_mode mode, int *contended, 41 enum migrate_mode mode, int *contended);
36 int alloc_flags, int classzone_idx);
37extern void compact_pgdat(pg_data_t *pgdat, int order); 42extern void compact_pgdat(pg_data_t *pgdat, int order);
38extern void reset_isolation_suitable(pg_data_t *pgdat); 43extern void reset_isolation_suitable(pg_data_t *pgdat);
39extern unsigned long compaction_suitable(struct zone *zone, int order, 44extern unsigned long compaction_suitable(struct zone *zone, int order,
40 int alloc_flags, int classzone_idx); 45 int alloc_flags, int classzone_idx);
41 46
42/* Do not skip compaction more than 64 times */ 47extern void defer_compaction(struct zone *zone, int order);
43#define COMPACT_MAX_DEFER_SHIFT 6 48extern bool compaction_deferred(struct zone *zone, int order);
44 49extern void compaction_defer_reset(struct zone *zone, int order,
45/* 50 bool alloc_success);
46 * Compaction is deferred when compaction fails to result in a page 51extern bool compaction_restarting(struct zone *zone, int order);
47 * allocation success. 1 << compact_defer_limit compactions are skipped up
48 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
49 */
50static inline void defer_compaction(struct zone *zone, int order)
51{
52 zone->compact_considered = 0;
53 zone->compact_defer_shift++;
54
55 if (order < zone->compact_order_failed)
56 zone->compact_order_failed = order;
57
58 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
59 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
60}
61
62/* Returns true if compaction should be skipped this time */
63static inline bool compaction_deferred(struct zone *zone, int order)
64{
65 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
66
67 if (order < zone->compact_order_failed)
68 return false;
69
70 /* Avoid possible overflow */
71 if (++zone->compact_considered > defer_limit)
72 zone->compact_considered = defer_limit;
73
74 return zone->compact_considered < defer_limit;
75}
76
77/*
78 * Update defer tracking counters after successful compaction of given order,
79 * which means an allocation either succeeded (alloc_success == true) or is
80 * expected to succeed.
81 */
82static inline void compaction_defer_reset(struct zone *zone, int order,
83 bool alloc_success)
84{
85 if (alloc_success) {
86 zone->compact_considered = 0;
87 zone->compact_defer_shift = 0;
88 }
89 if (order >= zone->compact_order_failed)
90 zone->compact_order_failed = order + 1;
91}
92
93/* Returns true if restarting compaction after many failures */
94static inline bool compaction_restarting(struct zone *zone, int order)
95{
96 if (order < zone->compact_order_failed)
97 return false;
98
99 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
100 zone->compact_considered >= 1UL << zone->compact_defer_shift;
101}
102 52
103#else 53#else
104static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 54static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
105 int order, gfp_t gfp_mask, nodemask_t *nodemask, 55 unsigned int order, int alloc_flags,
106 enum migrate_mode mode, int *contended, 56 const struct alloc_context *ac,
107 int alloc_flags, int classzone_idx) 57 enum migrate_mode mode, int *contended)
108{ 58{
109 return COMPACT_CONTINUE; 59 return COMPACT_CONTINUE;
110} 60}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 7450ca2ac1fc..ab25814690bc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -689,6 +689,15 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
689asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, 689asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
690 compat_stack_t __user *uoss_ptr); 690 compat_stack_t __user *uoss_ptr);
691 691
692#ifdef __ARCH_WANT_SYS_SIGPENDING
693asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
694#endif
695
696#ifdef __ARCH_WANT_SYS_SIGPROCMASK
697asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset,
698 compat_old_sigset_t __user *oset);
699#endif
700
692int compat_restore_altstack(const compat_stack_t __user *uss); 701int compat_restore_altstack(const compat_stack_t __user *uss);
693int __compat_save_altstack(compat_stack_t __user *, unsigned long); 702int __compat_save_altstack(compat_stack_t __user *, unsigned long);
694#define compat_save_altstack_ex(uss, sp) do { \ 703#define compat_save_altstack_ex(uss, sp) do { \
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 02ae99e8e6d3..cdf13ca7cac3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -66,6 +66,7 @@
66#define __deprecated __attribute__((deprecated)) 66#define __deprecated __attribute__((deprecated))
67#define __packed __attribute__((packed)) 67#define __packed __attribute__((packed))
68#define __weak __attribute__((weak)) 68#define __weak __attribute__((weak))
69#define __alias(symbol) __attribute__((alias(#symbol)))
69 70
70/* 71/*
71 * it doesn't make sense on ARM (currently the only user of __naked) to trace 72 * it doesn't make sense on ARM (currently the only user of __naked) to trace
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index d1a558239b1a..769e19864632 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -85,3 +85,7 @@
85#define __HAVE_BUILTIN_BSWAP16__ 85#define __HAVE_BUILTIN_BSWAP16__
86#endif 86#endif
87#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 87#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
88
89#if GCC_VERSION >= 40902
90#define KASAN_ABI_VERSION 3
91#endif
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
index c8c565952548..efee493714eb 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
@@ -63,3 +63,5 @@
63#define __HAVE_BUILTIN_BSWAP64__ 63#define __HAVE_BUILTIN_BSWAP64__
64#define __HAVE_BUILTIN_BSWAP16__ 64#define __HAVE_BUILTIN_BSWAP16__
65#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 65#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
66
67#define KASAN_ABI_VERSION 4
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a1c81f80978e..d1ec10a940ff 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -54,7 +54,11 @@ extern void __chk_io_ptr(const volatile void __iomem *);
54#include <linux/compiler-gcc.h> 54#include <linux/compiler-gcc.h>
55#endif 55#endif
56 56
57#ifdef CC_USING_HOTPATCH
58#define notrace __attribute__((hotpatch(0,0)))
59#else
57#define notrace __attribute__((no_instrument_function)) 60#define notrace __attribute__((no_instrument_function))
61#endif
58 62
59/* Intel compiler defines __GNUC__. So we will overwrite implementations 63/* Intel compiler defines __GNUC__. So we will overwrite implementations
60 * coming from above header files here 64 * coming from above header files here
@@ -215,7 +219,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
215 } 219 }
216} 220}
217 221
218static __always_inline void __assign_once_size(volatile void *p, void *res, int size) 222static __always_inline void __write_once_size(volatile void *p, void *res, int size)
219{ 223{
220 switch (size) { 224 switch (size) {
221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 225 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +239,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
235/* 239/*
236 * Prevent the compiler from merging or refetching reads or writes. The 240 * Prevent the compiler from merging or refetching reads or writes. The
237 * compiler is also forbidden from reordering successive instances of 241 * compiler is also forbidden from reordering successive instances of
238 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the 242 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
239 * compiler is aware of some particular ordering. One way to make the 243 * compiler is aware of some particular ordering. One way to make the
240 * compiler aware of ordering is to put the two invocations of READ_ONCE, 244 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. 245 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
242 * 246 *
243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 247 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244 * data types like structs or unions. If the size of the accessed data 248 * data types like structs or unions. If the size of the accessed data
245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 249 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246 * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a 250 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
247 * compile-time warning. 251 * compile-time warning.
248 * 252 *
249 * Their two major use cases are: (1) Mediating communication between 253 * Their two major use cases are: (1) Mediating communication between
@@ -257,8 +261,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
257#define READ_ONCE(x) \ 261#define READ_ONCE(x) \
258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) 262 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259 263
260#define ASSIGN_ONCE(val, x) \ 264#define WRITE_ONCE(x, val) \
261 ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) 265 ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
262 266
263#endif /* __KERNEL__ */ 267#endif /* __KERNEL__ */
264 268
@@ -385,7 +389,7 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
385 389
386/* Is this type a native word size -- useful for atomic operations */ 390/* Is this type a native word size -- useful for atomic operations */
387#ifndef __native_word 391#ifndef __native_word
388# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 392# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
389#endif 393#endif
390 394
391/* Compile time object size, -1 for unknown */ 395/* Compile time object size, -1 for unknown */
@@ -447,12 +451,23 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
447 * to make the compiler aware of ordering is to put the two invocations of 451 * to make the compiler aware of ordering is to put the two invocations of
448 * ACCESS_ONCE() in different C statements. 452 * ACCESS_ONCE() in different C statements.
449 * 453 *
450 * This macro does absolutely -nothing- to prevent the CPU from reordering, 454 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
451 * merging, or refetching absolutely anything at any time. Its main intended 455 * on a union member will work as long as the size of the member matches the
452 * use is to mediate communication between process-level code and irq/NMI 456 * size of the union and the size is smaller than word size.
453 * handlers, all running on the same CPU. 457 *
458 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
459 * between process-level code and irq/NMI handlers, all running on the same CPU,
460 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
461 * mutilate accesses that either do not require ordering or that interact
462 * with an explicit memory barrier or atomic instruction that provides the
463 * required ordering.
464 *
465 * If possible use READ_ONCE/ASSIGN_ONCE instead.
454 */ 466 */
455#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 467#define __ACCESS_ONCE(x) ({ \
468 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
469 (volatile typeof(x) *)&(x); })
470#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
456 471
457/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ 472/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
458#ifdef CONFIG_KPROBES 473#ifdef CONFIG_KPROBES
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 5d3c54311f7a..3486b9082adb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -179,15 +179,6 @@ struct coresight_device {
179#define sink_ops(csdev) csdev->ops->sink_ops 179#define sink_ops(csdev) csdev->ops->sink_ops
180#define link_ops(csdev) csdev->ops->link_ops 180#define link_ops(csdev) csdev->ops->link_ops
181 181
182#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \
183 __mode, __get, __set, __fmt) \
184DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \
185static const struct coresight_ops_entry __name ## _entry = { \
186 .name = __entry_name, \
187 .mode = __mode, \
188 .ops = &__name ## _ops \
189}
190
191/** 182/**
192 * struct coresight_ops_sink - basic operations for a sink 183 * struct coresight_ops_sink - basic operations for a sink
193 * Operations available for sinks 184 * Operations available for sinks
@@ -236,13 +227,8 @@ coresight_register(struct coresight_desc *desc);
236extern void coresight_unregister(struct coresight_device *csdev); 227extern void coresight_unregister(struct coresight_device *csdev);
237extern int coresight_enable(struct coresight_device *csdev); 228extern int coresight_enable(struct coresight_device *csdev);
238extern void coresight_disable(struct coresight_device *csdev); 229extern void coresight_disable(struct coresight_device *csdev);
239extern int coresight_is_bit_set(u32 val, int position, int value);
240extern int coresight_timeout(void __iomem *addr, u32 offset, 230extern int coresight_timeout(void __iomem *addr, u32 offset,
241 int position, int value); 231 int position, int value);
242#ifdef CONFIG_OF
243extern struct coresight_platform_data *of_get_coresight_platform_data(
244 struct device *dev, struct device_node *node);
245#endif
246#else 232#else
247static inline struct coresight_device * 233static inline struct coresight_device *
248coresight_register(struct coresight_desc *desc) { return NULL; } 234coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -250,14 +236,16 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
250static inline int 236static inline int
251coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } 237coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
252static inline void coresight_disable(struct coresight_device *csdev) {} 238static inline void coresight_disable(struct coresight_device *csdev) {}
253static inline int coresight_is_bit_set(u32 val, int position, int value)
254 { return 0; }
255static inline int coresight_timeout(void __iomem *addr, u32 offset, 239static inline int coresight_timeout(void __iomem *addr, u32 offset,
256 int position, int value) { return 1; } 240 int position, int value) { return 1; }
241#endif
242
257#ifdef CONFIG_OF 243#ifdef CONFIG_OF
244extern struct coresight_platform_data *of_get_coresight_platform_data(
245 struct device *dev, struct device_node *node);
246#else
258static inline struct coresight_platform_data *of_get_coresight_platform_data( 247static inline struct coresight_platform_data *of_get_coresight_platform_data(
259 struct device *dev, struct device_node *node) { return NULL; } 248 struct device *dev, struct device_node *node) { return NULL; }
260#endif 249#endif
261#endif
262 250
263#endif 251#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4d078cebafd2..2ee4888c1f47 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -66,8 +66,6 @@ struct cpufreq_policy {
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 67 should set cpufreq */
68 unsigned int cpu; /* cpu nr of CPU managing this policy */ 68 unsigned int cpu; /* cpu nr of CPU managing this policy */
69 unsigned int last_cpu; /* cpu nr of previous CPU that managed
70 * this policy */
71 struct clk *clk; 69 struct clk *clk;
72 struct cpufreq_cpuinfo cpuinfo;/* see above */ 70 struct cpufreq_cpuinfo cpuinfo;/* see above */
73 71
@@ -113,6 +111,9 @@ struct cpufreq_policy {
113 wait_queue_head_t transition_wait; 111 wait_queue_head_t transition_wait;
114 struct task_struct *transition_task; /* Task which is doing the transition */ 112 struct task_struct *transition_task; /* Task which is doing the transition */
115 113
114 /* cpufreq-stats */
115 struct cpufreq_stats *stats;
116
116 /* For cpufreq driver's internal use */ 117 /* For cpufreq driver's internal use */
117 void *driver_data; 118 void *driver_data;
118}; 119};
@@ -367,9 +368,8 @@ static inline void cpufreq_resume(void) {}
367#define CPUFREQ_INCOMPATIBLE (1) 368#define CPUFREQ_INCOMPATIBLE (1)
368#define CPUFREQ_NOTIFY (2) 369#define CPUFREQ_NOTIFY (2)
369#define CPUFREQ_START (3) 370#define CPUFREQ_START (3)
370#define CPUFREQ_UPDATE_POLICY_CPU (4) 371#define CPUFREQ_CREATE_POLICY (4)
371#define CPUFREQ_CREATE_POLICY (5) 372#define CPUFREQ_REMOVE_POLICY (5)
372#define CPUFREQ_REMOVE_POLICY (6)
373 373
374#ifdef CONFIG_CPU_FREQ 374#ifdef CONFIG_CPU_FREQ
375int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 375int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index ab70f3bc44ad..f551a9299ac9 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -50,6 +50,15 @@ struct cpuidle_state {
50 int index); 50 int index);
51 51
52 int (*enter_dead) (struct cpuidle_device *dev, int index); 52 int (*enter_dead) (struct cpuidle_device *dev, int index);
53
54 /*
55 * CPUs execute ->enter_freeze with the local tick or entire timekeeping
56 * suspended, so it must not re-enable interrupts at any point (even
57 * temporarily) or attempt to change states of clock event devices.
58 */
59 void (*enter_freeze) (struct cpuidle_device *dev,
60 struct cpuidle_driver *drv,
61 int index);
53}; 62};
54 63
55/* Idle State Flags */ 64/* Idle State Flags */
@@ -141,7 +150,7 @@ extern void cpuidle_resume(void);
141extern int cpuidle_enable_device(struct cpuidle_device *dev); 150extern int cpuidle_enable_device(struct cpuidle_device *dev);
142extern void cpuidle_disable_device(struct cpuidle_device *dev); 151extern void cpuidle_disable_device(struct cpuidle_device *dev);
143extern int cpuidle_play_dead(void); 152extern int cpuidle_play_dead(void);
144extern void cpuidle_use_deepest_state(bool enable); 153extern void cpuidle_enter_freeze(void);
145 154
146extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 155extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
147#else 156#else
@@ -174,7 +183,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
174{return -ENODEV; } 183{return -ENODEV; }
175static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 184static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
176static inline int cpuidle_play_dead(void) {return -ENODEV; } 185static inline int cpuidle_play_dead(void) {return -ENODEV; }
177static inline void cpuidle_use_deepest_state(bool enable) {} 186static inline void cpuidle_enter_freeze(void) { }
178static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 187static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
179 struct cpuidle_device *dev) {return NULL; } 188 struct cpuidle_device *dev) {return NULL; }
180#endif 189#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b950e9d6008b..086549a665e2 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -22,6 +22,14 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
22 */ 22 */
23#define cpumask_bits(maskp) ((maskp)->bits) 23#define cpumask_bits(maskp) ((maskp)->bits)
24 24
25/**
26 * cpumask_pr_args - printf args to output a cpumask
27 * @maskp: cpumask to be printed
28 *
29 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
30 */
31#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
32
25#if NR_CPUS == 1 33#if NR_CPUS == 1
26#define nr_cpu_ids 1 34#define nr_cpu_ids 1
27#else 35#else
@@ -539,21 +547,6 @@ static inline void cpumask_copy(struct cpumask *dstp,
539#define cpumask_of(cpu) (get_cpu_mask(cpu)) 547#define cpumask_of(cpu) (get_cpu_mask(cpu))
540 548
541/** 549/**
542 * cpumask_scnprintf - print a cpumask into a string as comma-separated hex
543 * @buf: the buffer to sprintf into
544 * @len: the length of the buffer
545 * @srcp: the cpumask to print
546 *
547 * If len is zero, returns zero. Otherwise returns the length of the
548 * (nul-terminated) @buf string.
549 */
550static inline int cpumask_scnprintf(char *buf, int len,
551 const struct cpumask *srcp)
552{
553 return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
554}
555
556/**
557 * cpumask_parse_user - extract a cpumask from a user string 550 * cpumask_parse_user - extract a cpumask from a user string
558 * @buf: the buffer to extract from 551 * @buf: the buffer to extract from
559 * @len: the length of the buffer 552 * @len: the length of the buffer
@@ -564,7 +557,7 @@ static inline int cpumask_scnprintf(char *buf, int len,
564static inline int cpumask_parse_user(const char __user *buf, int len, 557static inline int cpumask_parse_user(const char __user *buf, int len,
565 struct cpumask *dstp) 558 struct cpumask *dstp)
566{ 559{
567 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 560 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
568} 561}
569 562
570/** 563/**
@@ -579,23 +572,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
579 struct cpumask *dstp) 572 struct cpumask *dstp)
580{ 573{
581 return bitmap_parselist_user(buf, len, cpumask_bits(dstp), 574 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
582 nr_cpumask_bits); 575 nr_cpu_ids);
583}
584
585/**
586 * cpulist_scnprintf - print a cpumask into a string as comma-separated list
587 * @buf: the buffer to sprintf into
588 * @len: the length of the buffer
589 * @srcp: the cpumask to print
590 *
591 * If len is zero, returns zero. Otherwise returns the length of the
592 * (nul-terminated) @buf string.
593 */
594static inline int cpulist_scnprintf(char *buf, int len,
595 const struct cpumask *srcp)
596{
597 return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
598 nr_cpumask_bits);
599} 576}
600 577
601/** 578/**
@@ -610,7 +587,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
610 char *nl = strchr(buf, '\n'); 587 char *nl = strchr(buf, '\n');
611 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); 588 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
612 589
613 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 590 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
614} 591}
615 592
616/** 593/**
@@ -622,7 +599,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
622 */ 599 */
623static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 600static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
624{ 601{
625 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); 602 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
626} 603}
627 604
628/** 605/**
@@ -817,7 +794,7 @@ static inline ssize_t
817cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) 794cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
818{ 795{
819 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), 796 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
820 nr_cpumask_bits); 797 nr_cpu_ids);
821} 798}
822 799
823/* 800/*
@@ -905,13 +882,13 @@ static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
905} 882}
906 883
907#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) 884#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
908static inline void __cpus_setall(cpumask_t *dstp, int nbits) 885static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits)
909{ 886{
910 bitmap_fill(dstp->bits, nbits); 887 bitmap_fill(dstp->bits, nbits);
911} 888}
912 889
913#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) 890#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
914static inline void __cpus_clear(cpumask_t *dstp, int nbits) 891static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits)
915{ 892{
916 bitmap_zero(dstp->bits, nbits); 893 bitmap_zero(dstp->bits, nbits);
917} 894}
@@ -927,21 +904,21 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
927 904
928#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) 905#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
929static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, 906static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
930 const cpumask_t *src2p, int nbits) 907 const cpumask_t *src2p, unsigned int nbits)
931{ 908{
932 return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 909 return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
933} 910}
934 911
935#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) 912#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
936static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, 913static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
937 const cpumask_t *src2p, int nbits) 914 const cpumask_t *src2p, unsigned int nbits)
938{ 915{
939 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); 916 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
940} 917}
941 918
942#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) 919#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
943static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, 920static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
944 const cpumask_t *src2p, int nbits) 921 const cpumask_t *src2p, unsigned int nbits)
945{ 922{
946 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); 923 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
947} 924}
@@ -949,40 +926,40 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
949#define cpus_andnot(dst, src1, src2) \ 926#define cpus_andnot(dst, src1, src2) \
950 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) 927 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
951static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, 928static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
952 const cpumask_t *src2p, int nbits) 929 const cpumask_t *src2p, unsigned int nbits)
953{ 930{
954 return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 931 return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
955} 932}
956 933
957#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) 934#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
958static inline int __cpus_equal(const cpumask_t *src1p, 935static inline int __cpus_equal(const cpumask_t *src1p,
959 const cpumask_t *src2p, int nbits) 936 const cpumask_t *src2p, unsigned int nbits)
960{ 937{
961 return bitmap_equal(src1p->bits, src2p->bits, nbits); 938 return bitmap_equal(src1p->bits, src2p->bits, nbits);
962} 939}
963 940
964#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) 941#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
965static inline int __cpus_intersects(const cpumask_t *src1p, 942static inline int __cpus_intersects(const cpumask_t *src1p,
966 const cpumask_t *src2p, int nbits) 943 const cpumask_t *src2p, unsigned int nbits)
967{ 944{
968 return bitmap_intersects(src1p->bits, src2p->bits, nbits); 945 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
969} 946}
970 947
971#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) 948#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
972static inline int __cpus_subset(const cpumask_t *src1p, 949static inline int __cpus_subset(const cpumask_t *src1p,
973 const cpumask_t *src2p, int nbits) 950 const cpumask_t *src2p, unsigned int nbits)
974{ 951{
975 return bitmap_subset(src1p->bits, src2p->bits, nbits); 952 return bitmap_subset(src1p->bits, src2p->bits, nbits);
976} 953}
977 954
978#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) 955#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
979static inline int __cpus_empty(const cpumask_t *srcp, int nbits) 956static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits)
980{ 957{
981 return bitmap_empty(srcp->bits, nbits); 958 return bitmap_empty(srcp->bits, nbits);
982} 959}
983 960
984#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) 961#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
985static inline int __cpus_weight(const cpumask_t *srcp, int nbits) 962static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits)
986{ 963{
987 return bitmap_weight(srcp->bits, nbits); 964 return bitmap_weight(srcp->bits, nbits);
988} 965}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 9c8776d0ada8..fb5ef16d6a12 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -1147,7 +1147,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
1147 * cipher operation completes. 1147 * cipher operation completes.
1148 * 1148 *
1149 * The callback function is registered with the ablkcipher_request handle and 1149 * The callback function is registered with the ablkcipher_request handle and
1150 * must comply with the following template: 1150 * must comply with the following template
1151 * 1151 *
1152 * void callback_function(struct crypto_async_request *req, int error) 1152 * void callback_function(struct crypto_async_request *req, int error)
1153 */ 1153 */
@@ -1174,7 +1174,7 @@ static inline void ablkcipher_request_set_callback(
1174 * 1174 *
1175 * For encryption, the source is treated as the plaintext and the 1175 * For encryption, the source is treated as the plaintext and the
1176 * destination is the ciphertext. For a decryption operation, the use is 1176 * destination is the ciphertext. For a decryption operation, the use is
1177 * reversed: the source is the ciphertext and the destination is the plaintext. 1177 * reversed - the source is the ciphertext and the destination is the plaintext.
1178 */ 1178 */
1179static inline void ablkcipher_request_set_crypt( 1179static inline void ablkcipher_request_set_crypt(
1180 struct ablkcipher_request *req, 1180 struct ablkcipher_request *req,
@@ -1412,6 +1412,9 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
1412 */ 1412 */
1413static inline int crypto_aead_decrypt(struct aead_request *req) 1413static inline int crypto_aead_decrypt(struct aead_request *req)
1414{ 1414{
1415 if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
1416 return -EINVAL;
1417
1415 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); 1418 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
1416} 1419}
1417 1420
@@ -1506,7 +1509,7 @@ static inline void aead_request_free(struct aead_request *req)
1506 * completes 1509 * completes
1507 * 1510 *
1508 * The callback function is registered with the aead_request handle and 1511 * The callback function is registered with the aead_request handle and
1509 * must comply with the following template: 1512 * must comply with the following template
1510 * 1513 *
1511 * void callback_function(struct crypto_async_request *req, int error) 1514 * void callback_function(struct crypto_async_request *req, int error)
1512 */ 1515 */
@@ -1533,7 +1536,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
1533 * 1536 *
1534 * For encryption, the source is treated as the plaintext and the 1537 * For encryption, the source is treated as the plaintext and the
1535 * destination is the ciphertext. For a decryption operation, the use is 1538 * destination is the ciphertext. For a decryption operation, the use is
1536 * reversed: the source is the ciphertext and the destination is the plaintext. 1539 * reversed - the source is the ciphertext and the destination is the plaintext.
1537 * 1540 *
1538 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, 1541 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
1539 * the caller must concatenate the ciphertext followed by the 1542 * the caller must concatenate the ciphertext followed by the
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index 2cd9f1cf9fa3..f4754282c9c2 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -1,6 +1,8 @@
1#ifndef __CRYPTOHASH_H 1#ifndef __CRYPTOHASH_H
2#define __CRYPTOHASH_H 2#define __CRYPTOHASH_H
3 3
4#include <uapi/linux/types.h>
5
4#define SHA_DIGEST_WORDS 5 6#define SHA_DIGEST_WORDS 5
5#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) 7#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
6#define SHA_WORKSPACE_WORDS 16 8#define SHA_WORKSPACE_WORDS 16
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5a813988e6d4..92c08cf7670e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -319,9 +319,6 @@ static inline unsigned d_count(const struct dentry *dentry)
319 return dentry->d_lockref.count; 319 return dentry->d_lockref.count;
320} 320}
321 321
322/* validate "insecure" dentry pointer */
323extern int d_validate(struct dentry *, struct dentry *);
324
325/* 322/*
326 * helper function for dentry_operations.d_dname() members 323 * helper function for dentry_operations.d_dname() members
327 */ 324 */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index da4c4983adbe..cb25af461054 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -51,11 +51,21 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode,
51 struct dentry *parent, void *data, 51 struct dentry *parent, void *data,
52 const struct file_operations *fops); 52 const struct file_operations *fops);
53 53
54struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
55 struct dentry *parent, void *data,
56 const struct file_operations *fops,
57 loff_t file_size);
58
54struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); 59struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
55 60
56struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, 61struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
57 const char *dest); 62 const char *dest);
58 63
64struct dentry *debugfs_create_automount(const char *name,
65 struct dentry *parent,
66 struct vfsmount *(*f)(void *),
67 void *data);
68
59void debugfs_remove(struct dentry *dentry); 69void debugfs_remove(struct dentry *dentry);
60void debugfs_remove_recursive(struct dentry *dentry); 70void debugfs_remove_recursive(struct dentry *dentry);
61 71
@@ -124,6 +134,14 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
124 return ERR_PTR(-ENODEV); 134 return ERR_PTR(-ENODEV);
125} 135}
126 136
137static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
138 struct dentry *parent, void *data,
139 const struct file_operations *fops,
140 loff_t file_size)
141{
142 return ERR_PTR(-ENODEV);
143}
144
127static inline struct dentry *debugfs_create_dir(const char *name, 145static inline struct dentry *debugfs_create_dir(const char *name,
128 struct dentry *parent) 146 struct dentry *parent)
129{ 147{
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
new file mode 100644
index 000000000000..602fbbfcfeed
--- /dev/null
+++ b/include/linux/devfreq-event.h
@@ -0,0 +1,196 @@
1/*
2 * devfreq-event: a framework to provide raw data and events of devfreq devices
3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __LINUX_DEVFREQ_EVENT_H__
13#define __LINUX_DEVFREQ_EVENT_H__
14
15#include <linux/device.h>
16
17/**
18 * struct devfreq_event_dev - the devfreq-event device
19 *
20 * @node : Contain the devfreq-event device that have been registered.
21 * @dev : the device registered by devfreq-event class. dev.parent is
22 * the device using devfreq-event.
23 * @lock : a mutex to protect accessing devfreq-event.
24 * @enable_count: the number of enable function have been called.
25 * @desc : the description for devfreq-event device.
26 *
27 * This structure contains devfreq-event device information.
28 */
29struct devfreq_event_dev {
30 struct list_head node;
31
32 struct device dev;
33 struct mutex lock;
34 u32 enable_count;
35
36 const struct devfreq_event_desc *desc;
37};
38
39/**
40 * struct devfreq_event_data - the devfreq-event data
41 *
42 * @load_count : load count of devfreq-event device for the given period.
43 * @total_count : total count of devfreq-event device for the given period.
44 * each count may represent a clock cycle, a time unit
45 * (ns/us/...), or anything the device driver wants.
46 * Generally, utilization is load_count / total_count.
47 *
48 * This structure contains the data of devfreq-event device for polling period.
49 */
50struct devfreq_event_data {
51 unsigned long load_count;
52 unsigned long total_count;
53};
54
55/**
56 * struct devfreq_event_ops - the operations of devfreq-event device
57 *
58 * @enable : Enable the devfreq-event device.
59 * @disable : Disable the devfreq-event device.
60 * @reset : Reset all setting of the devfreq-event device.
61 * @set_event : Set the specific event type for the devfreq-event device.
62 * @get_event : Get the result of the devfreq-event devie with specific
63 * event type.
64 *
65 * This structure contains devfreq-event device operations which can be
66 * implemented by devfreq-event device drivers.
67 */
68struct devfreq_event_ops {
69 /* Optional functions */
70 int (*enable)(struct devfreq_event_dev *edev);
71 int (*disable)(struct devfreq_event_dev *edev);
72 int (*reset)(struct devfreq_event_dev *edev);
73
74 /* Mandatory functions */
75 int (*set_event)(struct devfreq_event_dev *edev);
76 int (*get_event)(struct devfreq_event_dev *edev,
77 struct devfreq_event_data *edata);
78};
79
80/**
81 * struct devfreq_event_desc - the descriptor of devfreq-event device
82 *
83 * @name : the name of devfreq-event device.
84 * @driver_data : the private data for devfreq-event driver.
85 * @ops : the operation to control devfreq-event device.
86 *
87 * Each devfreq-event device is described with a this structure.
88 * This structure contains the various data for devfreq-event device.
89 */
90struct devfreq_event_desc {
91 const char *name;
92 void *driver_data;
93
94 struct devfreq_event_ops *ops;
95};
96
97#if defined(CONFIG_PM_DEVFREQ_EVENT)
98extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
99extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
100extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
101extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
102extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
103 struct devfreq_event_data *edata);
104extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
105extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
106 struct device *dev, int index);
107extern int devfreq_event_get_edev_count(struct device *dev);
108extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
109 struct devfreq_event_desc *desc);
110extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
111extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
112 struct devfreq_event_desc *desc);
113extern void devm_devfreq_event_remove_edev(struct device *dev,
114 struct devfreq_event_dev *edev);
115static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
116{
117 return edev->desc->driver_data;
118}
119#else
120static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
121{
122 return -EINVAL;
123}
124
125static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
126{
127 return -EINVAL;
128}
129
130static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
131{
132 return false;
133}
134
135static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
136{
137 return -EINVAL;
138}
139
140static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
141 struct devfreq_event_data *edata)
142{
143 return -EINVAL;
144}
145
146static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
147{
148 return -EINVAL;
149}
150
151static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
152{
153 return ERR_PTR(-EINVAL);
154}
155
156static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
157 struct device *dev, int index)
158{
159 return ERR_PTR(-EINVAL);
160}
161
162static inline int devfreq_event_get_edev_count(struct device *dev)
163{
164 return -EINVAL;
165}
166
167static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
168 struct devfreq_event_desc *desc)
169{
170 return ERR_PTR(-EINVAL);
171}
172
173static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
174{
175 return -EINVAL;
176}
177
178static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
179 struct device *dev,
180 struct devfreq_event_desc *desc)
181{
182 return ERR_PTR(-EINVAL);
183}
184
185static inline void devm_devfreq_event_remove_edev(struct device *dev,
186 struct devfreq_event_dev *edev)
187{
188}
189
190static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
191{
192 return NULL;
193}
194#endif /* CONFIG_PM_DEVFREQ_EVENT */
195
196#endif /* __LINUX_DEVFREQ_EVENT_H__ */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ca6d2acc5eb7..2646aed1d3fe 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -48,6 +48,11 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
48typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 48typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
49typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 49typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 union map_info *map_context); 50 union map_info *map_context);
51typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
52 struct request *rq,
53 union map_info *map_context,
54 struct request **clone);
55typedef void (*dm_release_clone_request_fn) (struct request *clone);
51 56
52/* 57/*
53 * Returns: 58 * Returns:
@@ -143,6 +148,8 @@ struct target_type {
143 dm_dtr_fn dtr; 148 dm_dtr_fn dtr;
144 dm_map_fn map; 149 dm_map_fn map;
145 dm_map_request_fn map_rq; 150 dm_map_request_fn map_rq;
151 dm_clone_and_map_request_fn clone_and_map_rq;
152 dm_release_clone_request_fn release_clone_rq;
146 dm_endio_fn end_io; 153 dm_endio_fn end_io;
147 dm_request_endio_fn rq_end_io; 154 dm_request_endio_fn rq_end_io;
148 dm_presuspend_fn presuspend; 155 dm_presuspend_fn presuspend;
@@ -600,9 +607,6 @@ static inline unsigned long to_bytes(sector_t n)
600/*----------------------------------------------------------------- 607/*-----------------------------------------------------------------
601 * Helper for block layer and dm core operations 608 * Helper for block layer and dm core operations
602 *---------------------------------------------------------------*/ 609 *---------------------------------------------------------------*/
603void dm_dispatch_request(struct request *rq);
604void dm_requeue_unmapped_request(struct request *rq);
605void dm_kill_unmapped_request(struct request *rq, int error);
606int dm_underlying_device_busy(struct request_queue *q); 610int dm_underlying_device_busy(struct request_queue *q);
607 611
608#endif /* _LINUX_DEVICE_MAPPER_H */ 612#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index fb506738f7b7..0eb8ee2dc6d1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1038,22 +1038,22 @@ extern __printf(3, 4)
1038int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); 1038int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
1039 1039
1040extern __printf(3, 4) 1040extern __printf(3, 4)
1041int dev_printk(const char *level, const struct device *dev, 1041void dev_printk(const char *level, const struct device *dev,
1042 const char *fmt, ...); 1042 const char *fmt, ...);
1043extern __printf(2, 3) 1043extern __printf(2, 3)
1044int dev_emerg(const struct device *dev, const char *fmt, ...); 1044void dev_emerg(const struct device *dev, const char *fmt, ...);
1045extern __printf(2, 3) 1045extern __printf(2, 3)
1046int dev_alert(const struct device *dev, const char *fmt, ...); 1046void dev_alert(const struct device *dev, const char *fmt, ...);
1047extern __printf(2, 3) 1047extern __printf(2, 3)
1048int dev_crit(const struct device *dev, const char *fmt, ...); 1048void dev_crit(const struct device *dev, const char *fmt, ...);
1049extern __printf(2, 3) 1049extern __printf(2, 3)
1050int dev_err(const struct device *dev, const char *fmt, ...); 1050void dev_err(const struct device *dev, const char *fmt, ...);
1051extern __printf(2, 3) 1051extern __printf(2, 3)
1052int dev_warn(const struct device *dev, const char *fmt, ...); 1052void dev_warn(const struct device *dev, const char *fmt, ...);
1053extern __printf(2, 3) 1053extern __printf(2, 3)
1054int dev_notice(const struct device *dev, const char *fmt, ...); 1054void dev_notice(const struct device *dev, const char *fmt, ...);
1055extern __printf(2, 3) 1055extern __printf(2, 3)
1056int _dev_info(const struct device *dev, const char *fmt, ...); 1056void _dev_info(const struct device *dev, const char *fmt, ...);
1057 1057
1058#else 1058#else
1059 1059
@@ -1065,35 +1065,35 @@ static inline __printf(3, 4)
1065int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 1065int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
1066{ return 0; } 1066{ return 0; }
1067 1067
1068static inline int __dev_printk(const char *level, const struct device *dev, 1068static inline void __dev_printk(const char *level, const struct device *dev,
1069 struct va_format *vaf) 1069 struct va_format *vaf)
1070{ return 0; } 1070{}
1071static inline __printf(3, 4) 1071static inline __printf(3, 4)
1072int dev_printk(const char *level, const struct device *dev, 1072void dev_printk(const char *level, const struct device *dev,
1073 const char *fmt, ...) 1073 const char *fmt, ...)
1074{ return 0; } 1074{}
1075 1075
1076static inline __printf(2, 3) 1076static inline __printf(2, 3)
1077int dev_emerg(const struct device *dev, const char *fmt, ...) 1077void dev_emerg(const struct device *dev, const char *fmt, ...)
1078{ return 0; } 1078{}
1079static inline __printf(2, 3) 1079static inline __printf(2, 3)
1080int dev_crit(const struct device *dev, const char *fmt, ...) 1080void dev_crit(const struct device *dev, const char *fmt, ...)
1081{ return 0; } 1081{}
1082static inline __printf(2, 3) 1082static inline __printf(2, 3)
1083int dev_alert(const struct device *dev, const char *fmt, ...) 1083void dev_alert(const struct device *dev, const char *fmt, ...)
1084{ return 0; } 1084{}
1085static inline __printf(2, 3) 1085static inline __printf(2, 3)
1086int dev_err(const struct device *dev, const char *fmt, ...) 1086void dev_err(const struct device *dev, const char *fmt, ...)
1087{ return 0; } 1087{}
1088static inline __printf(2, 3) 1088static inline __printf(2, 3)
1089int dev_warn(const struct device *dev, const char *fmt, ...) 1089void dev_warn(const struct device *dev, const char *fmt, ...)
1090{ return 0; } 1090{}
1091static inline __printf(2, 3) 1091static inline __printf(2, 3)
1092int dev_notice(const struct device *dev, const char *fmt, ...) 1092void dev_notice(const struct device *dev, const char *fmt, ...)
1093{ return 0; } 1093{}
1094static inline __printf(2, 3) 1094static inline __printf(2, 3)
1095int _dev_info(const struct device *dev, const char *fmt, ...) 1095void _dev_info(const struct device *dev, const char *fmt, ...)
1096{ return 0; } 1096{}
1097 1097
1098#endif 1098#endif
1099 1099
@@ -1119,7 +1119,6 @@ do { \
1119({ \ 1119({ \
1120 if (0) \ 1120 if (0) \
1121 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1121 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1122 0; \
1123}) 1122})
1124#endif 1123#endif
1125 1124
@@ -1156,7 +1155,7 @@ do { \
1156#define dev_info_once(dev, fmt, ...) \ 1155#define dev_info_once(dev, fmt, ...) \
1157 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) 1156 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
1158#define dev_dbg_once(dev, fmt, ...) \ 1157#define dev_dbg_once(dev, fmt, ...) \
1159 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) 1158 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
1160 1159
1161#define dev_level_ratelimited(dev_level, dev, fmt, ...) \ 1160#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
1162do { \ 1161do { \
@@ -1215,7 +1214,6 @@ do { \
1215({ \ 1214({ \
1216 if (0) \ 1215 if (0) \
1217 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1216 dev_printk(KERN_DEBUG, dev, format, ##arg); \
1218 0; \
1219}) 1217})
1220#endif 1218#endif
1221 1219
diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h
index 3713a7232dd8..c0d4d1e2a45c 100644
--- a/include/linux/dqblk_v1.h
+++ b/include/linux/dqblk_v1.h
@@ -5,9 +5,6 @@
5#ifndef _LINUX_DQBLK_V1_H 5#ifndef _LINUX_DQBLK_V1_H
6#define _LINUX_DQBLK_V1_H 6#define _LINUX_DQBLK_V1_H
7 7
8/* Root squash turned on */
9#define V1_DQF_RSQUASH 1
10
11/* Numbers of blocks needed for updates */ 8/* Numbers of blocks needed for updates */
12#define V1_INIT_ALLOC 1 9#define V1_INIT_ALLOC 1
13#define V1_INIT_REWRITE 1 10#define V1_INIT_REWRITE 1
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0238d612750e..cf7e431cbc73 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -848,7 +848,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
848} 848}
849 849
850static inline char * 850static inline char *
851efi_guid_unparse(efi_guid_t *guid, char *out) 851efi_guid_to_str(efi_guid_t *guid, char *out)
852{ 852{
853 sprintf(out, "%pUl", guid->b); 853 sprintf(out, "%pUl", guid->b);
854 return out; 854 return out;
@@ -875,6 +875,8 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
875#endif 875#endif
876extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); 876extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
877extern int efi_config_init(efi_config_table_type_t *arch_tables); 877extern int efi_config_init(efi_config_table_type_t *arch_tables);
878extern int efi_config_parse_tables(void *config_tables, int count, int sz,
879 efi_config_table_type_t *arch_tables);
878extern u64 efi_get_iobase (void); 880extern u64 efi_get_iobase (void);
879extern u32 efi_mem_type (unsigned long phys_addr); 881extern u32 efi_mem_type (unsigned long phys_addr);
880extern u64 efi_mem_attributes (unsigned long phys_addr); 882extern u64 efi_mem_attributes (unsigned long phys_addr);
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 9a33c5f7e126..7be22da321f3 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -79,6 +79,12 @@ struct enclosure_component_callbacks {
79 int (*set_locate)(struct enclosure_device *, 79 int (*set_locate)(struct enclosure_device *,
80 struct enclosure_component *, 80 struct enclosure_component *,
81 enum enclosure_component_setting); 81 enum enclosure_component_setting);
82 void (*get_power_status)(struct enclosure_device *,
83 struct enclosure_component *);
84 int (*set_power_status)(struct enclosure_device *,
85 struct enclosure_component *,
86 int);
87 int (*show_id)(struct enclosure_device *, char *buf);
82}; 88};
83 89
84 90
@@ -91,7 +97,9 @@ struct enclosure_component {
91 int fault; 97 int fault;
92 int active; 98 int active;
93 int locate; 99 int locate;
100 int slot;
94 enum enclosure_status status; 101 enum enclosure_status status;
102 int power_status;
95}; 103};
96 104
97struct enclosure_device { 105struct enclosure_device {
@@ -120,8 +128,9 @@ enclosure_register(struct device *, const char *, int,
120 struct enclosure_component_callbacks *); 128 struct enclosure_component_callbacks *);
121void enclosure_unregister(struct enclosure_device *); 129void enclosure_unregister(struct enclosure_device *);
122struct enclosure_component * 130struct enclosure_component *
123enclosure_component_register(struct enclosure_device *, unsigned int, 131enclosure_component_alloc(struct enclosure_device *, unsigned int,
124 enum enclosure_component_type, const char *); 132 enum enclosure_component_type, const char *);
133int enclosure_component_register(struct enclosure_component *);
125int enclosure_add_device(struct enclosure_device *enclosure, int component, 134int enclosure_add_device(struct enclosure_device *enclosure, int component,
126 struct device *dev); 135 struct device *dev);
127int enclosure_remove_device(struct enclosure_device *, struct device *); 136int enclosure_remove_device(struct enclosure_device *, struct device *);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 41c891d05f04..1d869d185a0d 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
52#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 52#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
53#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) 53#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
54 54
55struct sk_buff **eth_gro_receive(struct sk_buff **head,
56 struct sk_buff *skb);
57int eth_gro_complete(struct sk_buff *skb, int nhoff);
58
55/* Reserved Ethernet Addresses per IEEE 802.1Q */ 59/* Reserved Ethernet Addresses per IEEE 802.1Q */
56static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = 60static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
57{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 61{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 41b223a59a63..fa05e04c5531 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct dentry; 6struct dentry;
7struct iattr;
7struct inode; 8struct inode;
8struct super_block; 9struct super_block;
9struct vfsmount; 10struct vfsmount;
@@ -180,6 +181,21 @@ struct fid {
180 * get_name is not (which is possibly inconsistent) 181 * get_name is not (which is possibly inconsistent)
181 */ 182 */
182 183
184/* types of block ranges for multipage write mappings. */
185#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
186#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
187#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
188#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
189
190#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
191
192struct iomap {
193 sector_t blkno; /* first sector of mapping */
194 loff_t offset; /* file offset of mapping, bytes */
195 u64 length; /* length of mapping, bytes */
196 int type; /* type of mapping */
197};
198
183struct export_operations { 199struct export_operations {
184 int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len, 200 int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
185 struct inode *parent); 201 struct inode *parent);
@@ -191,6 +207,13 @@ struct export_operations {
191 struct dentry *child); 207 struct dentry *child);
192 struct dentry * (*get_parent)(struct dentry *child); 208 struct dentry * (*get_parent)(struct dentry *child);
193 int (*commit_metadata)(struct inode *inode); 209 int (*commit_metadata)(struct inode *inode);
210
211 int (*get_uuid)(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
212 int (*map_blocks)(struct inode *inode, loff_t offset,
213 u64 len, struct iomap *iomap,
214 bool write, u32 *device_generation);
215 int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
216 int nr_iomaps, struct iattr *iattr);
194}; 217};
195 218
196extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, 219extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 87f14e90e984..a23556c32703 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -19,12 +19,16 @@
19#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ 19#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
20#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ 20#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
21#define F2FS_BLKSIZE 4096 /* support only 4KB block */ 21#define F2FS_BLKSIZE 4096 /* support only 4KB block */
22#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
22#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ 23#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
23#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) 24#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
24 25
25#define NULL_ADDR ((block_t)0) /* used as block_t addresses */ 26#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
26#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ 27#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
27 28
29#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
30#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
31
28/* 0, 1(node nid), 2(meta nid) are reserved node id */ 32/* 0, 1(node nid), 2(meta nid) are reserved node id */
29#define F2FS_RESERVED_NODE_NUM 3 33#define F2FS_RESERVED_NODE_NUM 3
30 34
@@ -87,6 +91,7 @@ struct f2fs_super_block {
87/* 91/*
88 * For checkpoint 92 * For checkpoint
89 */ 93 */
94#define CP_FASTBOOT_FLAG 0x00000020
90#define CP_FSCK_FLAG 0x00000010 95#define CP_FSCK_FLAG 0x00000010
91#define CP_ERROR_FLAG 0x00000008 96#define CP_ERROR_FLAG 0x00000008
92#define CP_COMPACT_SUM_FLAG 0x00000004 97#define CP_COMPACT_SUM_FLAG 0x00000004
@@ -224,6 +229,8 @@ enum {
224 OFFSET_BIT_SHIFT 229 OFFSET_BIT_SHIFT
225}; 230};
226 231
232#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
233
227struct node_footer { 234struct node_footer {
228 __le32 nid; /* node id */ 235 __le32 nid; /* node id */
229 __le32 ino; /* inode nunmber */ 236 __le32 ino; /* inode nunmber */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 09bb7a18d287..043f3283b71c 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -726,7 +726,9 @@ extern int fb_videomode_from_videomode(const struct videomode *vm,
726 struct fb_videomode *fbmode); 726 struct fb_videomode *fbmode);
727 727
728/* drivers/video/modedb.c */ 728/* drivers/video/modedb.c */
729#define VESA_MODEDB_SIZE 34 729#define VESA_MODEDB_SIZE 43
730#define DMT_SIZE 0x50
731
730extern void fb_var_to_videomode(struct fb_videomode *mode, 732extern void fb_var_to_videomode(struct fb_videomode *mode,
731 const struct fb_var_screeninfo *var); 733 const struct fb_var_screeninfo *var);
732extern void fb_videomode_to_var(struct fb_var_screeninfo *var, 734extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
@@ -777,9 +779,17 @@ struct fb_videomode {
777 u32 flag; 779 u32 flag;
778}; 780};
779 781
782struct dmt_videomode {
783 u32 dmt_id;
784 u32 std_2byte_code;
785 u32 cvt_3byte_code;
786 const struct fb_videomode *mode;
787};
788
780extern const char *fb_mode_option; 789extern const char *fb_mode_option;
781extern const struct fb_videomode vesa_modes[]; 790extern const struct fb_videomode vesa_modes[];
782extern const struct fb_videomode cea_modes[64]; 791extern const struct fb_videomode cea_modes[64];
792extern const struct dmt_videomode dmt_modes[];
783 793
784struct fb_modelist { 794struct fb_modelist {
785 struct list_head list; 795 struct list_head list;
diff --git a/include/linux/fec.h b/include/linux/fec.h
index bcff455d1d53..1454a503622d 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -19,6 +19,7 @@
19struct fec_platform_data { 19struct fec_platform_data {
20 phy_interface_t phy; 20 phy_interface_t phy;
21 unsigned char mac[ETH_ALEN]; 21 unsigned char mac[ETH_ALEN];
22 void (*sleep_mode_enable)(int enabled);
22}; 23};
23 24
24#endif 25#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b49842fe203f..a9250b2a11ba 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -34,6 +34,7 @@
34#include <asm/byteorder.h> 34#include <asm/byteorder.h>
35#include <uapi/linux/fs.h> 35#include <uapi/linux/fs.h>
36 36
37struct backing_dev_info;
37struct export_operations; 38struct export_operations;
38struct hd_geometry; 39struct hd_geometry;
39struct iovec; 40struct iovec;
@@ -50,6 +51,7 @@ struct swap_info_struct;
50struct seq_file; 51struct seq_file;
51struct workqueue_struct; 52struct workqueue_struct;
52struct iov_iter; 53struct iov_iter;
54struct vm_fault;
53 55
54extern void __init inode_init(void); 56extern void __init inode_init(void);
55extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
@@ -135,7 +137,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
135#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 137#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
136 138
137/* File was opened by fanotify and shouldn't generate fanotify events */ 139/* File was opened by fanotify and shouldn't generate fanotify events */
138#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 140#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
139 141
140/* 142/*
141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector 143 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
@@ -360,8 +362,6 @@ struct address_space_operations {
360 int (*releasepage) (struct page *, gfp_t); 362 int (*releasepage) (struct page *, gfp_t);
361 void (*freepage)(struct page *); 363 void (*freepage)(struct page *);
362 ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); 364 ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
363 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
364 void **, unsigned long *);
365 /* 365 /*
366 * migrate the contents of a page to the specified target. If 366 * migrate the contents of a page to the specified target. If
367 * migrate_mode is MIGRATE_ASYNC, it must not block. 367 * migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -394,14 +394,12 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
394 loff_t pos, unsigned len, unsigned copied, 394 loff_t pos, unsigned len, unsigned copied,
395 struct page *page, void *fsdata); 395 struct page *page, void *fsdata);
396 396
397struct backing_dev_info;
398struct address_space { 397struct address_space {
399 struct inode *host; /* owner: inode, block_device */ 398 struct inode *host; /* owner: inode, block_device */
400 struct radix_tree_root page_tree; /* radix tree of all pages */ 399 struct radix_tree_root page_tree; /* radix tree of all pages */
401 spinlock_t tree_lock; /* and lock protecting it */ 400 spinlock_t tree_lock; /* and lock protecting it */
402 atomic_t i_mmap_writable;/* count VM_SHARED mappings */ 401 atomic_t i_mmap_writable;/* count VM_SHARED mappings */
403 struct rb_root i_mmap; /* tree of private and shared mappings */ 402 struct rb_root i_mmap; /* tree of private and shared mappings */
404 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
405 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ 403 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
406 /* Protected by tree_lock together with the radix tree */ 404 /* Protected by tree_lock together with the radix tree */
407 unsigned long nrpages; /* number of total pages */ 405 unsigned long nrpages; /* number of total pages */
@@ -409,7 +407,6 @@ struct address_space {
409 pgoff_t writeback_index;/* writeback starts here */ 407 pgoff_t writeback_index;/* writeback starts here */
410 const struct address_space_operations *a_ops; /* methods */ 408 const struct address_space_operations *a_ops; /* methods */
411 unsigned long flags; /* error bits/gfp mask */ 409 unsigned long flags; /* error bits/gfp mask */
412 struct backing_dev_info *backing_dev_info; /* device readahead, etc */
413 spinlock_t private_lock; /* for use by the address_space */ 410 spinlock_t private_lock; /* for use by the address_space */
414 struct list_head private_list; /* ditto */ 411 struct list_head private_list; /* ditto */
415 void *private_data; /* ditto */ 412 void *private_data; /* ditto */
@@ -493,8 +490,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
493 */ 490 */
494static inline int mapping_mapped(struct address_space *mapping) 491static inline int mapping_mapped(struct address_space *mapping)
495{ 492{
496 return !RB_EMPTY_ROOT(&mapping->i_mmap) || 493 return !RB_EMPTY_ROOT(&mapping->i_mmap);
497 !list_empty(&mapping->i_mmap_nonlinear);
498} 494}
499 495
500/* 496/*
@@ -625,7 +621,7 @@ struct inode {
625 atomic_t i_readcount; /* struct files open RO */ 621 atomic_t i_readcount; /* struct files open RO */
626#endif 622#endif
627 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 623 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
628 struct file_lock *i_flock; 624 struct file_lock_context *i_flctx;
629 struct address_space i_data; 625 struct address_space i_data;
630 struct list_head i_devices; 626 struct list_head i_devices;
631 union { 627 union {
@@ -875,6 +871,7 @@ static inline struct file *get_file(struct file *f)
875#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ 871#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
876#define FL_UNLOCK_PENDING 512 /* Lease is being broken */ 872#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
877#define FL_OFDLCK 1024 /* lock is "owned" by struct file */ 873#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
874#define FL_LAYOUT 2048 /* outstanding pNFS layout */
878 875
879/* 876/*
880 * Special return value from posix_lock_file() and vfs_lock_file() for 877 * Special return value from posix_lock_file() and vfs_lock_file() for
@@ -885,6 +882,8 @@ static inline struct file *get_file(struct file *f)
885/* legacy typedef, should eventually be removed */ 882/* legacy typedef, should eventually be removed */
886typedef void *fl_owner_t; 883typedef void *fl_owner_t;
887 884
885struct file_lock;
886
888struct file_lock_operations { 887struct file_lock_operations {
889 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 888 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
890 void (*fl_release_private)(struct file_lock *); 889 void (*fl_release_private)(struct file_lock *);
@@ -898,7 +897,7 @@ struct lock_manager_operations {
898 void (*lm_notify)(struct file_lock *); /* unblock callback */ 897 void (*lm_notify)(struct file_lock *); /* unblock callback */
899 int (*lm_grant)(struct file_lock *, int); 898 int (*lm_grant)(struct file_lock *, int);
900 bool (*lm_break)(struct file_lock *); 899 bool (*lm_break)(struct file_lock *);
901 int (*lm_change)(struct file_lock **, int, struct list_head *); 900 int (*lm_change)(struct file_lock *, int, struct list_head *);
902 void (*lm_setup)(struct file_lock *, void **); 901 void (*lm_setup)(struct file_lock *, void **);
903}; 902};
904 903
@@ -923,17 +922,17 @@ int locks_in_grace(struct net *);
923 * FIXME: should we create a separate "struct lock_request" to help distinguish 922 * FIXME: should we create a separate "struct lock_request" to help distinguish
924 * these two uses? 923 * these two uses?
925 * 924 *
926 * The i_flock list is ordered by: 925 * The varous i_flctx lists are ordered by:
927 * 926 *
928 * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX 927 * 1) lock owner
929 * 2) lock owner 928 * 2) lock range start
930 * 3) lock range start 929 * 3) lock range end
931 * 4) lock range end
932 * 930 *
933 * Obviously, the last two criteria only matter for POSIX locks. 931 * Obviously, the last two criteria only matter for POSIX locks.
934 */ 932 */
935struct file_lock { 933struct file_lock {
936 struct file_lock *fl_next; /* singly linked list for this inode */ 934 struct file_lock *fl_next; /* singly linked list for this inode */
935 struct list_head fl_list; /* link into file_lock_context */
937 struct hlist_node fl_link; /* node in global lists */ 936 struct hlist_node fl_link; /* node in global lists */
938 struct list_head fl_block; /* circular list of blocked processes */ 937 struct list_head fl_block; /* circular list of blocked processes */
939 fl_owner_t fl_owner; 938 fl_owner_t fl_owner;
@@ -964,6 +963,16 @@ struct file_lock {
964 } fl_u; 963 } fl_u;
965}; 964};
966 965
966struct file_lock_context {
967 spinlock_t flc_lock;
968 struct list_head flc_flock;
969 struct list_head flc_posix;
970 struct list_head flc_lease;
971 int flc_flock_cnt;
972 int flc_posix_cnt;
973 int flc_lease_cnt;
974};
975
967/* The following constant reflects the upper bound of the file/locking space */ 976/* The following constant reflects the upper bound of the file/locking space */
968#ifndef OFFSET_MAX 977#ifndef OFFSET_MAX
969#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) 978#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
@@ -990,6 +999,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
990extern int fcntl_getlease(struct file *filp); 999extern int fcntl_getlease(struct file *filp);
991 1000
992/* fs/locks.c */ 1001/* fs/locks.c */
1002void locks_free_lock_context(struct file_lock_context *ctx);
993void locks_free_lock(struct file_lock *fl); 1003void locks_free_lock(struct file_lock *fl);
994extern void locks_init_lock(struct file_lock *); 1004extern void locks_init_lock(struct file_lock *);
995extern struct file_lock * locks_alloc_lock(void); 1005extern struct file_lock * locks_alloc_lock(void);
@@ -1010,7 +1020,7 @@ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int t
1010extern void lease_get_mtime(struct inode *, struct timespec *time); 1020extern void lease_get_mtime(struct inode *, struct timespec *time);
1011extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); 1021extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
1012extern int vfs_setlease(struct file *, long, struct file_lock **, void **); 1022extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
1013extern int lease_modify(struct file_lock **, int, struct list_head *); 1023extern int lease_modify(struct file_lock *, int, struct list_head *);
1014#else /* !CONFIG_FILE_LOCKING */ 1024#else /* !CONFIG_FILE_LOCKING */
1015static inline int fcntl_getlk(struct file *file, unsigned int cmd, 1025static inline int fcntl_getlk(struct file *file, unsigned int cmd,
1016 struct flock __user *user) 1026 struct flock __user *user)
@@ -1047,6 +1057,11 @@ static inline int fcntl_getlease(struct file *filp)
1047 return F_UNLCK; 1057 return F_UNLCK;
1048} 1058}
1049 1059
1060static inline void
1061locks_free_lock_context(struct file_lock_context *ctx)
1062{
1063}
1064
1050static inline void locks_init_lock(struct file_lock *fl) 1065static inline void locks_init_lock(struct file_lock *fl)
1051{ 1066{
1052 return; 1067 return;
@@ -1137,7 +1152,7 @@ static inline int vfs_setlease(struct file *filp, long arg,
1137 return -EINVAL; 1152 return -EINVAL;
1138} 1153}
1139 1154
1140static inline int lease_modify(struct file_lock **before, int arg, 1155static inline int lease_modify(struct file_lock *fl, int arg,
1141 struct list_head *dispose) 1156 struct list_head *dispose)
1142{ 1157{
1143 return -EINVAL; 1158 return -EINVAL;
@@ -1184,8 +1199,6 @@ struct mm_struct;
1184#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ 1199#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1185#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ 1200#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1186 1201
1187extern struct list_head super_blocks;
1188extern spinlock_t sb_lock;
1189 1202
1190/* Possible states of 'frozen' field */ 1203/* Possible states of 'frozen' field */
1191enum { 1204enum {
@@ -1502,6 +1515,26 @@ struct block_device_operations;
1502#define HAVE_COMPAT_IOCTL 1 1515#define HAVE_COMPAT_IOCTL 1
1503#define HAVE_UNLOCKED_IOCTL 1 1516#define HAVE_UNLOCKED_IOCTL 1
1504 1517
1518/*
1519 * These flags let !MMU mmap() govern direct device mapping vs immediate
1520 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
1521 *
1522 * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
1523 * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
1524 * NOMMU_MAP_READ: Can be mapped for reading
1525 * NOMMU_MAP_WRITE: Can be mapped for writing
1526 * NOMMU_MAP_EXEC: Can be mapped for execution
1527 */
1528#define NOMMU_MAP_COPY 0x00000001
1529#define NOMMU_MAP_DIRECT 0x00000008
1530#define NOMMU_MAP_READ VM_MAYREAD
1531#define NOMMU_MAP_WRITE VM_MAYWRITE
1532#define NOMMU_MAP_EXEC VM_MAYEXEC
1533
1534#define NOMMU_VMFLAGS \
1535 (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
1536
1537
1505struct iov_iter; 1538struct iov_iter;
1506 1539
1507struct file_operations { 1540struct file_operations {
@@ -1536,6 +1569,9 @@ struct file_operations {
1536 long (*fallocate)(struct file *file, int mode, loff_t offset, 1569 long (*fallocate)(struct file *file, int mode, loff_t offset,
1537 loff_t len); 1570 loff_t len);
1538 void (*show_fdinfo)(struct seq_file *m, struct file *f); 1571 void (*show_fdinfo)(struct seq_file *m, struct file *f);
1572#ifndef CONFIG_MMU
1573 unsigned (*mmap_capabilities)(struct file *);
1574#endif
1539}; 1575};
1540 1576
1541struct inode_operations { 1577struct inode_operations {
@@ -1618,8 +1654,10 @@ struct super_operations {
1618 struct dquot **(*get_dquots)(struct inode *); 1654 struct dquot **(*get_dquots)(struct inode *);
1619#endif 1655#endif
1620 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); 1656 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
1621 long (*nr_cached_objects)(struct super_block *, int); 1657 long (*nr_cached_objects)(struct super_block *,
1622 long (*free_cached_objects)(struct super_block *, long, int); 1658 struct shrink_control *);
1659 long (*free_cached_objects)(struct super_block *,
1660 struct shrink_control *);
1623}; 1661};
1624 1662
1625/* 1663/*
@@ -1638,6 +1676,11 @@ struct super_operations {
1638#define S_IMA 1024 /* Inode has an associated IMA struct */ 1676#define S_IMA 1024 /* Inode has an associated IMA struct */
1639#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ 1677#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
1640#define S_NOSEC 4096 /* no suid or xattr security attributes */ 1678#define S_NOSEC 4096 /* no suid or xattr security attributes */
1679#ifdef CONFIG_FS_DAX
1680#define S_DAX 8192 /* Direct Access, avoiding the page cache */
1681#else
1682#define S_DAX 0 /* Make all the DAX code disappear */
1683#endif
1641 1684
1642/* 1685/*
1643 * Note that nosuid etc flags are inode-specific: setting some file-system 1686 * Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1675,6 +1718,7 @@ struct super_operations {
1675#define IS_IMA(inode) ((inode)->i_flags & S_IMA) 1718#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
1676#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) 1719#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
1677#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) 1720#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
1721#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
1678 1722
1679#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ 1723#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
1680 (inode)->i_rdev == WHITEOUT_DEV) 1724 (inode)->i_rdev == WHITEOUT_DEV)
@@ -1959,7 +2003,7 @@ static inline int locks_verify_truncate(struct inode *inode,
1959 struct file *filp, 2003 struct file *filp,
1960 loff_t size) 2004 loff_t size)
1961{ 2005{
1962 if (inode->i_flock && mandatory_lock(inode)) 2006 if (inode->i_flctx && mandatory_lock(inode))
1963 return locks_mandatory_area( 2007 return locks_mandatory_area(
1964 FLOCK_VERIFY_WRITE, inode, filp, 2008 FLOCK_VERIFY_WRITE, inode, filp,
1965 size < inode->i_size ? size : inode->i_size, 2009 size < inode->i_size ? size : inode->i_size,
@@ -1973,11 +2017,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1973{ 2017{
1974 /* 2018 /*
1975 * Since this check is lockless, we must ensure that any refcounts 2019 * Since this check is lockless, we must ensure that any refcounts
1976 * taken are done before checking inode->i_flock. Otherwise, we could 2020 * taken are done before checking i_flctx->flc_lease. Otherwise, we
1977 * end up racing with tasks trying to set a new lease on this file. 2021 * could end up racing with tasks trying to set a new lease on this
2022 * file.
1978 */ 2023 */
1979 smp_mb(); 2024 smp_mb();
1980 if (inode->i_flock) 2025 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
1981 return __break_lease(inode, mode, FL_LEASE); 2026 return __break_lease(inode, mode, FL_LEASE);
1982 return 0; 2027 return 0;
1983} 2028}
@@ -1986,11 +2031,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
1986{ 2031{
1987 /* 2032 /*
1988 * Since this check is lockless, we must ensure that any refcounts 2033 * Since this check is lockless, we must ensure that any refcounts
1989 * taken are done before checking inode->i_flock. Otherwise, we could 2034 * taken are done before checking i_flctx->flc_lease. Otherwise, we
1990 * end up racing with tasks trying to set a new lease on this file. 2035 * could end up racing with tasks trying to set a new lease on this
2036 * file.
1991 */ 2037 */
1992 smp_mb(); 2038 smp_mb();
1993 if (inode->i_flock) 2039 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
1994 return __break_lease(inode, mode, FL_DELEG); 2040 return __break_lease(inode, mode, FL_DELEG);
1995 return 0; 2041 return 0;
1996} 2042}
@@ -2017,6 +2063,16 @@ static inline int break_deleg_wait(struct inode **delegated_inode)
2017 return ret; 2063 return ret;
2018} 2064}
2019 2065
2066static inline int break_layout(struct inode *inode, bool wait)
2067{
2068 smp_mb();
2069 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
2070 return __break_lease(inode,
2071 wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
2072 FL_LAYOUT);
2073 return 0;
2074}
2075
2020#else /* !CONFIG_FILE_LOCKING */ 2076#else /* !CONFIG_FILE_LOCKING */
2021static inline int locks_mandatory_locked(struct file *file) 2077static inline int locks_mandatory_locked(struct file *file)
2022{ 2078{
@@ -2072,6 +2128,11 @@ static inline int break_deleg_wait(struct inode **delegated_inode)
2072 return 0; 2128 return 0;
2073} 2129}
2074 2130
2131static inline int break_layout(struct inode *inode, bool wait)
2132{
2133 return 0;
2134}
2135
2075#endif /* CONFIG_FILE_LOCKING */ 2136#endif /* CONFIG_FILE_LOCKING */
2076 2137
2077/* fs/open.c */ 2138/* fs/open.c */
@@ -2476,8 +2537,6 @@ extern int sb_min_blocksize(struct super_block *, int);
2476 2537
2477extern int generic_file_mmap(struct file *, struct vm_area_struct *); 2538extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2478extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); 2539extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
2479extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
2480 unsigned long size, pgoff_t pgoff);
2481int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); 2540int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
2482extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); 2541extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
2483extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); 2542extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
@@ -2522,19 +2581,13 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
2522extern int generic_file_open(struct inode * inode, struct file * filp); 2581extern int generic_file_open(struct inode * inode, struct file * filp);
2523extern int nonseekable_open(struct inode * inode, struct file * filp); 2582extern int nonseekable_open(struct inode * inode, struct file * filp);
2524 2583
2525#ifdef CONFIG_FS_XIP 2584ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *,
2526extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, 2585 loff_t, get_block_t, dio_iodone_t, int flags);
2527 loff_t *ppos); 2586int dax_clear_blocks(struct inode *, sector_t block, long size);
2528extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); 2587int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
2529extern ssize_t xip_file_write(struct file *filp, const char __user *buf, 2588int dax_truncate_page(struct inode *, loff_t from, get_block_t);
2530 size_t len, loff_t *ppos); 2589int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
2531extern int xip_truncate_page(struct address_space *mapping, loff_t from); 2590#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
2532#else
2533static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
2534{
2535 return 0;
2536}
2537#endif
2538 2591
2539#ifdef CONFIG_BLOCK 2592#ifdef CONFIG_BLOCK
2540typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2593typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
@@ -2691,6 +2744,11 @@ extern int generic_show_options(struct seq_file *m, struct dentry *root);
2691extern void save_mount_options(struct super_block *sb, char *options); 2744extern void save_mount_options(struct super_block *sb, char *options);
2692extern void replace_mount_options(struct super_block *sb, char *options); 2745extern void replace_mount_options(struct super_block *sb, char *options);
2693 2746
2747static inline bool io_is_direct(struct file *filp)
2748{
2749 return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
2750}
2751
2694static inline ino_t parent_ino(struct dentry *dentry) 2752static inline ino_t parent_ino(struct dentry *dentry)
2695{ 2753{
2696 ino_t res; 2754 ino_t res;
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index f66525e72ccf..9dc4e0384bfb 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -1,17 +1,22 @@
1#include <linux/fs.h> 1#include <linux/wait.h>
2 2
3struct fs_pin { 3struct fs_pin {
4 atomic_long_t count; 4 wait_queue_head_t wait;
5 union { 5 int done;
6 struct { 6 struct hlist_node s_list;
7 struct hlist_node s_list; 7 struct hlist_node m_list;
8 struct hlist_node m_list;
9 };
10 struct rcu_head rcu;
11 };
12 void (*kill)(struct fs_pin *); 8 void (*kill)(struct fs_pin *);
13}; 9};
14 10
15void pin_put(struct fs_pin *); 11struct vfsmount;
12
13static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
14{
15 init_waitqueue_head(&p->wait);
16 p->kill = kill;
17}
18
16void pin_remove(struct fs_pin *); 19void pin_remove(struct fs_pin *);
20void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
17void pin_insert(struct fs_pin *, struct vfsmount *); 21void pin_insert(struct fs_pin *, struct vfsmount *);
22void pin_kill(struct fs_pin *);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 1c804b057fb1..7ee1774edee5 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
101 new_dir_mask |= FS_ISDIR; 101 new_dir_mask |= FS_ISDIR;
102 } 102 }
103 103
104 fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); 104 fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
105 fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); 105 fs_cookie);
106 fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
107 fs_cookie);
106 108
107 if (target) 109 if (target)
108 fsnotify_link_count(target); 110 fsnotify_link_count(target);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 0bebb5c348b8..c674ee8f7fca 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -44,6 +44,10 @@ const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
44const char *ftrace_print_hex_seq(struct trace_seq *p, 44const char *ftrace_print_hex_seq(struct trace_seq *p,
45 const unsigned char *buf, int len); 45 const unsigned char *buf, int len);
46 46
47const char *ftrace_print_array_seq(struct trace_seq *p,
48 const void *buf, int buf_len,
49 size_t el_size);
50
47struct trace_iterator; 51struct trace_iterator;
48struct trace_event; 52struct trace_event;
49 53
@@ -595,7 +599,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
595 char *filter_str); 599 char *filter_str);
596extern void ftrace_profile_free_filter(struct perf_event *event); 600extern void ftrace_profile_free_filter(struct perf_event *event);
597extern void *perf_trace_buf_prepare(int size, unsigned short type, 601extern void *perf_trace_buf_prepare(int size, unsigned short type,
598 struct pt_regs *regs, int *rctxp); 602 struct pt_regs **regs, int *rctxp);
599 603
600static inline void 604static inline void
601perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, 605perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 55b685719d52..09460d6d6682 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -11,6 +11,10 @@ extern void genl_unlock(void);
11extern int lockdep_genl_is_held(void); 11extern int lockdep_genl_is_held(void);
12#endif 12#endif
13 13
14/* for synchronisation between af_netlink and genetlink */
15extern atomic_t genl_sk_destructing_cnt;
16extern wait_queue_head_t genl_sk_destructing_waitq;
17
14/** 18/**
15 * rcu_dereference_genl - rcu_dereference with debug checking 19 * rcu_dereference_genl - rcu_dereference with debug checking
16 * @p: The pointer to read, prior to dereferencing 20 * @p: The pointer to read, prior to dereferencing
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b840e3b2770d..51bd1e72a917 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -334,18 +334,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
334} 334}
335extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 335extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
336 struct vm_area_struct *vma, unsigned long addr, 336 struct vm_area_struct *vma, unsigned long addr,
337 int node); 337 int node, bool hugepage);
338#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
339 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
338#else 340#else
339#define alloc_pages(gfp_mask, order) \ 341#define alloc_pages(gfp_mask, order) \
340 alloc_pages_node(numa_node_id(), gfp_mask, order) 342 alloc_pages_node(numa_node_id(), gfp_mask, order)
341#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \ 343#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
344 alloc_pages(gfp_mask, order)
345#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
342 alloc_pages(gfp_mask, order) 346 alloc_pages(gfp_mask, order)
343#endif 347#endif
344#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 348#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
345#define alloc_page_vma(gfp_mask, vma, addr) \ 349#define alloc_page_vma(gfp_mask, vma, addr) \
346 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) 350 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
347#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 351#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
348 alloc_pages_vma(gfp_mask, 0, vma, addr, node) 352 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
349 353
350extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); 354extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
351extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, 355extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index fd85cb120ee0..45afc2dee560 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -340,31 +340,32 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
340 * etc. 340 * etc.
341 */ 341 */
342#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags) 342#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
343#define gpiod_get(varargs...) __gpiod_get(varargs, 0) 343#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
344#define __gpiod_get_index(dev, con_id, index, flags, ...) \ 344#define __gpiod_get_index(dev, con_id, index, flags, ...) \
345 __gpiod_get_index(dev, con_id, index, flags) 345 __gpiod_get_index(dev, con_id, index, flags)
346#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0) 346#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
347#define __gpiod_get_optional(dev, con_id, flags, ...) \ 347#define __gpiod_get_optional(dev, con_id, flags, ...) \
348 __gpiod_get_optional(dev, con_id, flags) 348 __gpiod_get_optional(dev, con_id, flags)
349#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0) 349#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
350#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \ 350#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
351 __gpiod_get_index_optional(dev, con_id, index, flags) 351 __gpiod_get_index_optional(dev, con_id, index, flags)
352#define gpiod_get_index_optional(varargs...) \ 352#define gpiod_get_index_optional(varargs...) \
353 __gpiod_get_index_optional(varargs, 0) 353 __gpiod_get_index_optional(varargs, GPIOD_ASIS)
354#define __devm_gpiod_get(dev, con_id, flags, ...) \ 354#define __devm_gpiod_get(dev, con_id, flags, ...) \
355 __devm_gpiod_get(dev, con_id, flags) 355 __devm_gpiod_get(dev, con_id, flags)
356#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0) 356#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
357#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \ 357#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
358 __devm_gpiod_get_index(dev, con_id, index, flags) 358 __devm_gpiod_get_index(dev, con_id, index, flags)
359#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0) 359#define devm_gpiod_get_index(varargs...) \
360 __devm_gpiod_get_index(varargs, GPIOD_ASIS)
360#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \ 361#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
361 __devm_gpiod_get_optional(dev, con_id, flags) 362 __devm_gpiod_get_optional(dev, con_id, flags)
362#define devm_gpiod_get_optional(varargs...) \ 363#define devm_gpiod_get_optional(varargs...) \
363 __devm_gpiod_get_optional(varargs, 0) 364 __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
364#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \ 365#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
365 __devm_gpiod_get_index_optional(dev, con_id, index, flags) 366 __devm_gpiod_get_index_optional(dev, con_id, index, flags)
366#define devm_gpiod_get_index_optional(varargs...) \ 367#define devm_gpiod_get_index_optional(varargs...) \
367 __devm_gpiod_get_index_optional(varargs, 0) 368 __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
368 369
369#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) 370#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
370 371
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index cbb5790a35cd..e9744202fa29 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -25,6 +25,7 @@
25#define __LINUX_HDMI_H_ 25#define __LINUX_HDMI_H_
26 26
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/device.h>
28 29
29enum hdmi_infoframe_type { 30enum hdmi_infoframe_type {
30 HDMI_INFOFRAME_TYPE_VENDOR = 0x81, 31 HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
@@ -52,12 +53,18 @@ enum hdmi_colorspace {
52 HDMI_COLORSPACE_RGB, 53 HDMI_COLORSPACE_RGB,
53 HDMI_COLORSPACE_YUV422, 54 HDMI_COLORSPACE_YUV422,
54 HDMI_COLORSPACE_YUV444, 55 HDMI_COLORSPACE_YUV444,
56 HDMI_COLORSPACE_YUV420,
57 HDMI_COLORSPACE_RESERVED4,
58 HDMI_COLORSPACE_RESERVED5,
59 HDMI_COLORSPACE_RESERVED6,
60 HDMI_COLORSPACE_IDO_DEFINED,
55}; 61};
56 62
57enum hdmi_scan_mode { 63enum hdmi_scan_mode {
58 HDMI_SCAN_MODE_NONE, 64 HDMI_SCAN_MODE_NONE,
59 HDMI_SCAN_MODE_OVERSCAN, 65 HDMI_SCAN_MODE_OVERSCAN,
60 HDMI_SCAN_MODE_UNDERSCAN, 66 HDMI_SCAN_MODE_UNDERSCAN,
67 HDMI_SCAN_MODE_RESERVED,
61}; 68};
62 69
63enum hdmi_colorimetry { 70enum hdmi_colorimetry {
@@ -71,6 +78,7 @@ enum hdmi_picture_aspect {
71 HDMI_PICTURE_ASPECT_NONE, 78 HDMI_PICTURE_ASPECT_NONE,
72 HDMI_PICTURE_ASPECT_4_3, 79 HDMI_PICTURE_ASPECT_4_3,
73 HDMI_PICTURE_ASPECT_16_9, 80 HDMI_PICTURE_ASPECT_16_9,
81 HDMI_PICTURE_ASPECT_RESERVED,
74}; 82};
75 83
76enum hdmi_active_aspect { 84enum hdmi_active_aspect {
@@ -92,12 +100,18 @@ enum hdmi_extended_colorimetry {
92 HDMI_EXTENDED_COLORIMETRY_S_YCC_601, 100 HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
93 HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601, 101 HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
94 HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB, 102 HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
103
104 /* The following EC values are only defined in CEA-861-F. */
105 HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
106 HDMI_EXTENDED_COLORIMETRY_BT2020,
107 HDMI_EXTENDED_COLORIMETRY_RESERVED,
95}; 108};
96 109
97enum hdmi_quantization_range { 110enum hdmi_quantization_range {
98 HDMI_QUANTIZATION_RANGE_DEFAULT, 111 HDMI_QUANTIZATION_RANGE_DEFAULT,
99 HDMI_QUANTIZATION_RANGE_LIMITED, 112 HDMI_QUANTIZATION_RANGE_LIMITED,
100 HDMI_QUANTIZATION_RANGE_FULL, 113 HDMI_QUANTIZATION_RANGE_FULL,
114 HDMI_QUANTIZATION_RANGE_RESERVED,
101}; 115};
102 116
103/* non-uniform picture scaling */ 117/* non-uniform picture scaling */
@@ -114,7 +128,7 @@ enum hdmi_ycc_quantization_range {
114}; 128};
115 129
116enum hdmi_content_type { 130enum hdmi_content_type {
117 HDMI_CONTENT_TYPE_NONE, 131 HDMI_CONTENT_TYPE_GRAPHICS,
118 HDMI_CONTENT_TYPE_PHOTO, 132 HDMI_CONTENT_TYPE_PHOTO,
119 HDMI_CONTENT_TYPE_CINEMA, 133 HDMI_CONTENT_TYPE_CINEMA,
120 HDMI_CONTENT_TYPE_GAME, 134 HDMI_CONTENT_TYPE_GAME,
@@ -194,6 +208,7 @@ enum hdmi_audio_coding_type {
194 HDMI_AUDIO_CODING_TYPE_MLP, 208 HDMI_AUDIO_CODING_TYPE_MLP,
195 HDMI_AUDIO_CODING_TYPE_DST, 209 HDMI_AUDIO_CODING_TYPE_DST,
196 HDMI_AUDIO_CODING_TYPE_WMA_PRO, 210 HDMI_AUDIO_CODING_TYPE_WMA_PRO,
211 HDMI_AUDIO_CODING_TYPE_CXT,
197}; 212};
198 213
199enum hdmi_audio_sample_size { 214enum hdmi_audio_sample_size {
@@ -215,10 +230,25 @@ enum hdmi_audio_sample_frequency {
215}; 230};
216 231
217enum hdmi_audio_coding_type_ext { 232enum hdmi_audio_coding_type_ext {
218 HDMI_AUDIO_CODING_TYPE_EXT_STREAM, 233 /* Refer to Audio Coding Type (CT) field in Data Byte 1 */
234 HDMI_AUDIO_CODING_TYPE_EXT_CT,
235
236 /*
237 * The next three CXT values are defined in CEA-861-E only.
238 * They do not exist in older versions, and in CEA-861-F they are
239 * defined as 'Not in use'.
240 */
219 HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC, 241 HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
220 HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2, 242 HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
221 HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND, 243 HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
244
245 /* The following CXT values are only defined in CEA-861-F. */
246 HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
247 HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
248 HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
249 HDMI_AUDIO_CODING_TYPE_EXT_DRA,
250 HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
251 HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
222}; 252};
223 253
224struct hdmi_audio_infoframe { 254struct hdmi_audio_infoframe {
@@ -299,5 +329,8 @@ union hdmi_infoframe {
299 329
300ssize_t 330ssize_t
301hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); 331hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
332int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
333void hdmi_infoframe_log(const char *level, struct device *dev,
334 union hdmi_infoframe *frame);
302 335
303#endif /* _DRM_HDMI_H */ 336#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 06c4607744f6..efc7787a41a8 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -574,7 +574,9 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
574#define HID_GLOBAL_STACK_SIZE 4 574#define HID_GLOBAL_STACK_SIZE 4
575#define HID_COLLECTION_STACK_SIZE 4 575#define HID_COLLECTION_STACK_SIZE 4
576 576
577#define HID_SCAN_FLAG_MT_WIN_8 0x00000001 577#define HID_SCAN_FLAG_MT_WIN_8 BIT(0)
578#define HID_SCAN_FLAG_VENDOR_SPECIFIC BIT(1)
579#define HID_SCAN_FLAG_GD_POINTER BIT(2)
578 580
579struct hid_parser { 581struct hid_parser {
580 struct hid_global global; 582 struct hid_global global;
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index bb9840fd1e18..464f33814a94 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -250,17 +250,29 @@ void host1x_job_unpin(struct host1x_job *job);
250struct host1x_device; 250struct host1x_device;
251 251
252struct host1x_driver { 252struct host1x_driver {
253 struct device_driver driver;
254
253 const struct of_device_id *subdevs; 255 const struct of_device_id *subdevs;
254 struct list_head list; 256 struct list_head list;
255 const char *name;
256 257
257 int (*probe)(struct host1x_device *device); 258 int (*probe)(struct host1x_device *device);
258 int (*remove)(struct host1x_device *device); 259 int (*remove)(struct host1x_device *device);
260 void (*shutdown)(struct host1x_device *device);
259}; 261};
260 262
261int host1x_driver_register(struct host1x_driver *driver); 263static inline struct host1x_driver *
264to_host1x_driver(struct device_driver *driver)
265{
266 return container_of(driver, struct host1x_driver, driver);
267}
268
269int host1x_driver_register_full(struct host1x_driver *driver,
270 struct module *owner);
262void host1x_driver_unregister(struct host1x_driver *driver); 271void host1x_driver_unregister(struct host1x_driver *driver);
263 272
273#define host1x_driver_register(driver) \
274 host1x_driver_register_full(driver, THIS_MODULE)
275
264struct host1x_device { 276struct host1x_device {
265 struct host1x_driver *driver; 277 struct host1x_driver *driver;
266 struct list_head list; 278 struct list_head list;
@@ -272,6 +284,8 @@ struct host1x_device {
272 284
273 struct mutex clients_lock; 285 struct mutex clients_lock;
274 struct list_head clients; 286 struct list_head clients;
287
288 bool registered;
275}; 289};
276 290
277static inline struct host1x_device *to_host1x_device(struct device *dev) 291static inline struct host1x_device *to_host1x_device(struct device *dev)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index a036d058a249..05f6df1fdf5b 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -170,6 +170,7 @@ enum hrtimer_base_type {
170 * @clock_was_set: Indicates that clock was set from irq context. 170 * @clock_was_set: Indicates that clock was set from irq context.
171 * @expires_next: absolute time of the next event which was scheduled 171 * @expires_next: absolute time of the next event which was scheduled
172 * via clock_set_next_event() 172 * via clock_set_next_event()
173 * @in_hrtirq: hrtimer_interrupt() is currently executing
173 * @hres_active: State of high resolution mode 174 * @hres_active: State of high resolution mode
174 * @hang_detected: The last hrtimer interrupt detected a hang 175 * @hang_detected: The last hrtimer interrupt detected a hang
175 * @nr_events: Total number of hrtimer interrupt events 176 * @nr_events: Total number of hrtimer interrupt events
@@ -185,6 +186,7 @@ struct hrtimer_cpu_base {
185 unsigned int clock_was_set; 186 unsigned int clock_was_set;
186#ifdef CONFIG_HIGH_RES_TIMERS 187#ifdef CONFIG_HIGH_RES_TIMERS
187 ktime_t expires_next; 188 ktime_t expires_next;
189 int in_hrtirq;
188 int hres_active; 190 int hres_active;
189 int hang_detected; 191 int hang_detected;
190 unsigned long nr_events; 192 unsigned long nr_events;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ad9051bab267..f10b20f05159 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -157,6 +157,13 @@ static inline int hpage_nr_pages(struct page *page)
157extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 157extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
158 unsigned long addr, pmd_t pmd, pmd_t *pmdp); 158 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
159 159
160extern struct page *huge_zero_page;
161
162static inline bool is_huge_zero_page(struct page *page)
163{
164 return ACCESS_ONCE(huge_zero_page) == page;
165}
166
160#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 167#else /* CONFIG_TRANSPARENT_HUGEPAGE */
161#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 168#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
162#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 169#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -206,6 +213,11 @@ static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_str
206 return 0; 213 return 0;
207} 214}
208 215
216static inline bool is_huge_zero_page(struct page *page)
217{
218 return false;
219}
220
209#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 221#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
210 222
211#endif /* _LINUX_HUGE_MM_H */ 223#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 431b7fc605c9..7b5785032049 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -86,7 +86,7 @@ void free_huge_page(struct page *page);
86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
87#endif 87#endif
88 88
89extern unsigned long hugepages_treat_as_movable; 89extern int hugepages_treat_as_movable;
90extern int sysctl_hugetlb_shm_group; 90extern int sysctl_hugetlb_shm_group;
91extern struct list_head huge_boot_pages; 91extern struct list_head huge_boot_pages;
92 92
@@ -99,9 +99,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
99struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 99struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
100 int write); 100 int write);
101struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 101struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
102 pmd_t *pmd, int write); 102 pmd_t *pmd, int flags);
103struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 103struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
104 pud_t *pud, int write); 104 pud_t *pud, int flags);
105int pmd_huge(pmd_t pmd); 105int pmd_huge(pmd_t pmd);
106int pud_huge(pud_t pmd); 106int pud_huge(pud_t pmd);
107unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 107unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -133,8 +133,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
133static inline void hugetlb_show_meminfo(void) 133static inline void hugetlb_show_meminfo(void)
134{ 134{
135} 135}
136#define follow_huge_pmd(mm, addr, pmd, write) NULL 136#define follow_huge_pmd(mm, addr, pmd, flags) NULL
137#define follow_huge_pud(mm, addr, pud, write) NULL 137#define follow_huge_pud(mm, addr, pud, flags) NULL
138#define prepare_hugepage_range(file, addr, len) (-EINVAL) 138#define prepare_hugepage_range(file, addr, len) (-EINVAL)
139#define pmd_huge(x) 0 139#define pmd_huge(x) 0
140#define pud_huge(x) 0 140#define pud_huge(x) 0
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 914bb08cd738..eb7b414d232b 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -12,8 +12,10 @@
12#ifndef LINUX_HWRANDOM_H_ 12#ifndef LINUX_HWRANDOM_H_
13#define LINUX_HWRANDOM_H_ 13#define LINUX_HWRANDOM_H_
14 14
15#include <linux/completion.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/kref.h>
17 19
18/** 20/**
19 * struct hwrng - Hardware Random Number Generator driver 21 * struct hwrng - Hardware Random Number Generator driver
@@ -44,6 +46,8 @@ struct hwrng {
44 46
45 /* internal. */ 47 /* internal. */
46 struct list_head list; 48 struct list_head list;
49 struct kref ref;
50 struct completion cleanup_done;
47}; 51};
48 52
49/** Register a new Hardware Random Number Generator driver. */ 53/** Register a new Hardware Random Number Generator driver. */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 476c685ca6f9..5a2ba674795e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -57,6 +57,18 @@ struct hv_multipage_buffer {
57 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; 57 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
58}; 58};
59 59
60/*
61 * Multiple-page buffer array; the pfn array is variable size:
62 * The number of entries in the PFN array is determined by
63 * "len" and "offset".
64 */
65struct hv_mpb_array {
66 /* Length and Offset determines the # of pfns in the array */
67 u32 len;
68 u32 offset;
69 u64 pfn_array[];
70};
71
60/* 0x18 includes the proprietary packet header */ 72/* 0x18 includes the proprietary packet header */
61#define MAX_PAGE_BUFFER_PACKET (0x18 + \ 73#define MAX_PAGE_BUFFER_PACKET (0x18 + \
62 (sizeof(struct hv_page_buffer) * \ 74 (sizeof(struct hv_page_buffer) * \
@@ -722,7 +734,12 @@ struct vmbus_channel {
722 */ 734 */
723 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 735 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
724 736
725 spinlock_t sc_lock; 737 /*
738 * The spinlock to protect the structure. It is being used to protect
739 * test-and-set access to various attributes of the structure as well
740 * as all sc_list operations.
741 */
742 spinlock_t lock;
726 /* 743 /*
727 * All Sub-channels of a primary channel are linked here. 744 * All Sub-channels of a primary channel are linked here.
728 */ 745 */
@@ -814,6 +831,18 @@ struct vmbus_channel_packet_multipage_buffer {
814 struct hv_multipage_buffer range; 831 struct hv_multipage_buffer range;
815} __packed; 832} __packed;
816 833
834/* The format must be the same as struct vmdata_gpa_direct */
835struct vmbus_packet_mpb_array {
836 u16 type;
837 u16 dataoffset8;
838 u16 length8;
839 u16 flags;
840 u64 transactionid;
841 u32 reserved;
842 u32 rangecount; /* Always 1 in this case */
843 struct hv_mpb_array range;
844} __packed;
845
817 846
818extern int vmbus_open(struct vmbus_channel *channel, 847extern int vmbus_open(struct vmbus_channel *channel,
819 u32 send_ringbuffersize, 848 u32 send_ringbuffersize,
@@ -845,6 +874,13 @@ extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
845 u32 bufferlen, 874 u32 bufferlen,
846 u64 requestid); 875 u64 requestid);
847 876
877extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
878 struct vmbus_packet_mpb_array *mpb,
879 u32 desc_size,
880 void *buffer,
881 u32 bufferlen,
882 u64 requestid);
883
848extern int vmbus_establish_gpadl(struct vmbus_channel *channel, 884extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
849 void *kbuffer, 885 void *kbuffer,
850 u32 size, 886 u32 size,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e3a1721c8354..7c7695940ddd 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -228,7 +228,9 @@ struct i2c_client {
228 struct device dev; /* the device structure */ 228 struct device dev; /* the device structure */
229 int irq; /* irq issued by device */ 229 int irq; /* irq issued by device */
230 struct list_head detected; 230 struct list_head detected;
231#if IS_ENABLED(CONFIG_I2C_SLAVE)
231 i2c_slave_cb_t slave_cb; /* callback for slave mode */ 232 i2c_slave_cb_t slave_cb; /* callback for slave mode */
233#endif
232}; 234};
233#define to_i2c_client(d) container_of(d, struct i2c_client, dev) 235#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
234 236
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
253 255
254/* I2C slave support */ 256/* I2C slave support */
255 257
258#if IS_ENABLED(CONFIG_I2C_SLAVE)
256enum i2c_slave_event { 259enum i2c_slave_event {
257 I2C_SLAVE_REQ_READ_START, 260 I2C_SLAVE_REQ_READ_START,
258 I2C_SLAVE_REQ_READ_END, 261 I2C_SLAVE_REQ_READ_END,
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
269{ 272{
270 return client->slave_cb(client, event, val); 273 return client->slave_cb(client, event, val);
271} 274}
275#endif
272 276
273/** 277/**
274 * struct i2c_board_info - template for device creation 278 * struct i2c_board_info - template for device creation
@@ -404,8 +408,10 @@ struct i2c_algorithm {
404 /* To determine what the adapter supports */ 408 /* To determine what the adapter supports */
405 u32 (*functionality) (struct i2c_adapter *); 409 u32 (*functionality) (struct i2c_adapter *);
406 410
411#if IS_ENABLED(CONFIG_I2C_SLAVE)
407 int (*reg_slave)(struct i2c_client *client); 412 int (*reg_slave)(struct i2c_client *client);
408 int (*unreg_slave)(struct i2c_client *client); 413 int (*unreg_slave)(struct i2c_client *client);
414#endif
409}; 415};
410 416
411/** 417/**
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
deleted file mode 100644
index d23c3c20b201..000000000000
--- a/include/linux/i2o.h
+++ /dev/null
@@ -1,988 +0,0 @@
1/*
2 * I2O kernel space accessible structures/APIs
3 *
4 * (c) Copyright 1999, 2000 Red Hat Software
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 *************************************************************************
12 *
13 * This header file defined the I2O APIs/structures for use by
14 * the I2O kernel modules.
15 *
16 */
17
18#ifndef _I2O_H
19#define _I2O_H
20
21#include <linux/i2o-dev.h>
22
23/* How many different OSM's are we allowing */
24#define I2O_MAX_DRIVERS 8
25
26#include <linux/pci.h>
27#include <linux/bug.h>
28#include <linux/dma-mapping.h>
29#include <linux/string.h>
30#include <linux/slab.h>
31#include <linux/workqueue.h> /* work_struct */
32#include <linux/mempool.h>
33#include <linux/mutex.h>
34#include <linux/scatterlist.h>
35#include <linux/semaphore.h> /* Needed for MUTEX init macros */
36
37#include <asm/io.h>
38
39/* message queue empty */
40#define I2O_QUEUE_EMPTY 0xffffffff
41
42/*
43 * Cache strategies
44 */
45
46/* The NULL strategy leaves everything up to the controller. This tends to be a
47 * pessimal but functional choice.
48 */
49#define CACHE_NULL 0
50/* Prefetch data when reading. We continually attempt to load the next 32 sectors
51 * into the controller cache.
52 */
53#define CACHE_PREFETCH 1
54/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
55 * into the controller cache. When an I/O is less <= 8K we assume its probably
56 * not sequential and don't prefetch (default)
57 */
58#define CACHE_SMARTFETCH 2
59/* Data is written to the cache and then out on to the disk. The I/O must be
60 * physically on the medium before the write is acknowledged (default without
61 * NVRAM)
62 */
63#define CACHE_WRITETHROUGH 17
64/* Data is written to the cache and then out on to the disk. The controller
65 * is permitted to write back the cache any way it wants. (default if battery
66 * backed NVRAM is present). It can be useful to set this for swap regardless of
67 * battery state.
68 */
69#define CACHE_WRITEBACK 18
70/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
71 * write large I/O's directly to disk bypassing the cache to avoid the extra
72 * memory copy hits. Small writes are writeback cached
73 */
74#define CACHE_SMARTBACK 19
75/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
76 * write large I/O's directly to disk bypassing the cache to avoid the extra
77 * memory copy hits. Small writes are writethrough cached. Suitable for devices
78 * lacking battery backup
79 */
80#define CACHE_SMARTTHROUGH 20
81
82/*
83 * Ioctl structures
84 */
85
86#define BLKI2OGRSTRAT _IOR('2', 1, int)
87#define BLKI2OGWSTRAT _IOR('2', 2, int)
88#define BLKI2OSRSTRAT _IOW('2', 3, int)
89#define BLKI2OSWSTRAT _IOW('2', 4, int)
90
91/*
92 * I2O Function codes
93 */
94
95/*
96 * Executive Class
97 */
98#define I2O_CMD_ADAPTER_ASSIGN 0xB3
99#define I2O_CMD_ADAPTER_READ 0xB2
100#define I2O_CMD_ADAPTER_RELEASE 0xB5
101#define I2O_CMD_BIOS_INFO_SET 0xA5
102#define I2O_CMD_BOOT_DEVICE_SET 0xA7
103#define I2O_CMD_CONFIG_VALIDATE 0xBB
104#define I2O_CMD_CONN_SETUP 0xCA
105#define I2O_CMD_DDM_DESTROY 0xB1
106#define I2O_CMD_DDM_ENABLE 0xD5
107#define I2O_CMD_DDM_QUIESCE 0xC7
108#define I2O_CMD_DDM_RESET 0xD9
109#define I2O_CMD_DDM_SUSPEND 0xAF
110#define I2O_CMD_DEVICE_ASSIGN 0xB7
111#define I2O_CMD_DEVICE_RELEASE 0xB9
112#define I2O_CMD_HRT_GET 0xA8
113#define I2O_CMD_ADAPTER_CLEAR 0xBE
114#define I2O_CMD_ADAPTER_CONNECT 0xC9
115#define I2O_CMD_ADAPTER_RESET 0xBD
116#define I2O_CMD_LCT_NOTIFY 0xA2
117#define I2O_CMD_OUTBOUND_INIT 0xA1
118#define I2O_CMD_PATH_ENABLE 0xD3
119#define I2O_CMD_PATH_QUIESCE 0xC5
120#define I2O_CMD_PATH_RESET 0xD7
121#define I2O_CMD_STATIC_MF_CREATE 0xDD
122#define I2O_CMD_STATIC_MF_RELEASE 0xDF
123#define I2O_CMD_STATUS_GET 0xA0
124#define I2O_CMD_SW_DOWNLOAD 0xA9
125#define I2O_CMD_SW_UPLOAD 0xAB
126#define I2O_CMD_SW_REMOVE 0xAD
127#define I2O_CMD_SYS_ENABLE 0xD1
128#define I2O_CMD_SYS_MODIFY 0xC1
129#define I2O_CMD_SYS_QUIESCE 0xC3
130#define I2O_CMD_SYS_TAB_SET 0xA3
131
132/*
133 * Utility Class
134 */
135#define I2O_CMD_UTIL_NOP 0x00
136#define I2O_CMD_UTIL_ABORT 0x01
137#define I2O_CMD_UTIL_CLAIM 0x09
138#define I2O_CMD_UTIL_RELEASE 0x0B
139#define I2O_CMD_UTIL_PARAMS_GET 0x06
140#define I2O_CMD_UTIL_PARAMS_SET 0x05
141#define I2O_CMD_UTIL_EVT_REGISTER 0x13
142#define I2O_CMD_UTIL_EVT_ACK 0x14
143#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
144#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
145#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
146#define I2O_CMD_UTIL_LOCK 0x17
147#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
148#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
149
150/*
151 * SCSI Host Bus Adapter Class
152 */
153#define I2O_CMD_SCSI_EXEC 0x81
154#define I2O_CMD_SCSI_ABORT 0x83
155#define I2O_CMD_SCSI_BUSRESET 0x27
156
157/*
158 * Bus Adapter Class
159 */
160#define I2O_CMD_BUS_ADAPTER_RESET 0x85
161#define I2O_CMD_BUS_RESET 0x87
162#define I2O_CMD_BUS_SCAN 0x89
163#define I2O_CMD_BUS_QUIESCE 0x8b
164
165/*
166 * Random Block Storage Class
167 */
168#define I2O_CMD_BLOCK_READ 0x30
169#define I2O_CMD_BLOCK_WRITE 0x31
170#define I2O_CMD_BLOCK_CFLUSH 0x37
171#define I2O_CMD_BLOCK_MLOCK 0x49
172#define I2O_CMD_BLOCK_MUNLOCK 0x4B
173#define I2O_CMD_BLOCK_MMOUNT 0x41
174#define I2O_CMD_BLOCK_MEJECT 0x43
175#define I2O_CMD_BLOCK_POWER 0x70
176
177#define I2O_CMD_PRIVATE 0xFF
178
179/* Command status values */
180
181#define I2O_CMD_IN_PROGRESS 0x01
182#define I2O_CMD_REJECTED 0x02
183#define I2O_CMD_FAILED 0x03
184#define I2O_CMD_COMPLETED 0x04
185
186/* I2O API function return values */
187
188#define I2O_RTN_NO_ERROR 0
189#define I2O_RTN_NOT_INIT 1
190#define I2O_RTN_FREE_Q_EMPTY 2
191#define I2O_RTN_TCB_ERROR 3
192#define I2O_RTN_TRANSACTION_ERROR 4
193#define I2O_RTN_ADAPTER_ALREADY_INIT 5
194#define I2O_RTN_MALLOC_ERROR 6
195#define I2O_RTN_ADPTR_NOT_REGISTERED 7
196#define I2O_RTN_MSG_REPLY_TIMEOUT 8
197#define I2O_RTN_NO_STATUS 9
198#define I2O_RTN_NO_FIRM_VER 10
199#define I2O_RTN_NO_LINK_SPEED 11
200
201/* Reply message status defines for all messages */
202
203#define I2O_REPLY_STATUS_SUCCESS 0x00
204#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
205#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
206#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
207#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
208#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
209#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
210#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
211#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
212#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
213#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
214#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
215
216/* Status codes and Error Information for Parameter functions */
217
218#define I2O_PARAMS_STATUS_SUCCESS 0x00
219#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
220#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
221#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
222#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
223#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
224#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
225#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
226#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
227#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
228#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
229#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
230#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
231#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
232#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
233#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
234#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
235
236/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
237 * messages: Table 3-2 Detailed Status Codes.*/
238
239#define I2O_DSC_SUCCESS 0x0000
240#define I2O_DSC_BAD_KEY 0x0002
241#define I2O_DSC_TCL_ERROR 0x0003
242#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
243#define I2O_DSC_NO_SUCH_PAGE 0x0005
244#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
245#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
246#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
247#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
248#define I2O_DSC_DEVICE_LOCKED 0x000B
249#define I2O_DSC_DEVICE_RESET 0x000C
250#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
251#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
252#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
253#define I2O_DSC_INVALID_OFFSET 0x0010
254#define I2O_DSC_INVALID_PARAMETER 0x0011
255#define I2O_DSC_INVALID_REQUEST 0x0012
256#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
257#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
258#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
259#define I2O_DSC_MISSING_PARAMETER 0x0016
260#define I2O_DSC_TIMEOUT 0x0017
261#define I2O_DSC_UNKNOWN_ERROR 0x0018
262#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
263#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
264#define I2O_DSC_DEVICE_BUSY 0x001B
265#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
266
267/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
268 Status Codes.*/
269
270#define I2O_BSA_DSC_SUCCESS 0x0000
271#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
272#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
273#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
274#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
275#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
276#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
277#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
278#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
279#define I2O_BSA_DSC_BUS_FAILURE 0x0009
280#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
281#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
282#define I2O_BSA_DSC_DEVICE_RESET 0x000C
283#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
284#define I2O_BSA_DSC_TIMEOUT 0x000E
285
286/* FailureStatusCodes, Table 3-3 Message Failure Codes */
287
288#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
289#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
290#define I2O_FSC_TRANSPORT_CONGESTION 0x83
291#define I2O_FSC_TRANSPORT_FAILURE 0x84
292#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
293#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
294#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
295#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
296#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
297#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
298#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
299#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
300#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
301#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
302#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
303#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
304
305/* Device Claim Types */
306#define I2O_CLAIM_PRIMARY 0x01000000
307#define I2O_CLAIM_MANAGEMENT 0x02000000
308#define I2O_CLAIM_AUTHORIZED 0x03000000
309#define I2O_CLAIM_SECONDARY 0x04000000
310
311/* Message header defines for VersionOffset */
312#define I2OVER15 0x0001
313#define I2OVER20 0x0002
314
315/* Default is 1.5 */
316#define I2OVERSION I2OVER15
317
318#define SGL_OFFSET_0 I2OVERSION
319#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
320#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
321#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
322#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
323#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
324#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
325#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
326#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
327#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
328#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
329
330/* Transaction Reply Lists (TRL) Control Word structure */
331#define TRL_SINGLE_FIXED_LENGTH 0x00
332#define TRL_SINGLE_VARIABLE_LENGTH 0x40
333#define TRL_MULTIPLE_FIXED_LENGTH 0x80
334
335 /* msg header defines for MsgFlags */
336#define MSG_STATIC 0x0100
337#define MSG_64BIT_CNTXT 0x0200
338#define MSG_MULTI_TRANS 0x1000
339#define MSG_FAIL 0x2000
340#define MSG_FINAL 0x4000
341#define MSG_REPLY 0x8000
342
343 /* minimum size msg */
344#define THREE_WORD_MSG_SIZE 0x00030000
345#define FOUR_WORD_MSG_SIZE 0x00040000
346#define FIVE_WORD_MSG_SIZE 0x00050000
347#define SIX_WORD_MSG_SIZE 0x00060000
348#define SEVEN_WORD_MSG_SIZE 0x00070000
349#define EIGHT_WORD_MSG_SIZE 0x00080000
350#define NINE_WORD_MSG_SIZE 0x00090000
351#define TEN_WORD_MSG_SIZE 0x000A0000
352#define ELEVEN_WORD_MSG_SIZE 0x000B0000
353#define I2O_MESSAGE_SIZE(x) ((x)<<16)
354
355/* special TID assignments */
356#define ADAPTER_TID 0
357#define HOST_TID 1
358
359/* outbound queue defines */
360#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
361#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
362
363/* inbound queue definitions */
364#define I2O_MSG_INPOOL_MIN 32
365#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
366
367#define I2O_POST_WAIT_OK 0
368#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
369
370#define I2O_CONTEXT_LIST_MIN_LENGTH 15
371#define I2O_CONTEXT_LIST_USED 0x01
372#define I2O_CONTEXT_LIST_DELETED 0x02
373
374/* timeouts */
375#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
376#define I2O_TIMEOUT_MESSAGE_GET 5
377#define I2O_TIMEOUT_RESET 30
378#define I2O_TIMEOUT_STATUS_GET 5
379#define I2O_TIMEOUT_LCT_GET 360
380#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
381
382/* retries */
383#define I2O_HRT_GET_TRIES 3
384#define I2O_LCT_GET_TRIES 3
385
386/* defines for max_sectors and max_phys_segments */
387#define I2O_MAX_SECTORS 1024
388#define I2O_MAX_SECTORS_LIMITED 128
389#define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS
390
391/*
392 * Message structures
393 */
394struct i2o_message {
395 union {
396 struct {
397 u8 version_offset;
398 u8 flags;
399 u16 size;
400 u32 target_tid:12;
401 u32 init_tid:12;
402 u32 function:8;
403 u32 icntxt; /* initiator context */
404 u32 tcntxt; /* transaction context */
405 } s;
406 u32 head[4];
407 } u;
408 /* List follows */
409 u32 body[0];
410};
411
412/* MFA and I2O message used by mempool */
413struct i2o_msg_mfa {
414 u32 mfa; /* MFA returned by the controller */
415 struct i2o_message msg; /* I2O message */
416};
417
418/*
419 * Each I2O device entity has one of these. There is one per device.
420 */
421struct i2o_device {
422 i2o_lct_entry lct_data; /* Device LCT information */
423
424 struct i2o_controller *iop; /* Controlling IOP */
425 struct list_head list; /* node in IOP devices list */
426
427 struct device device;
428
429 struct mutex lock; /* device lock */
430};
431
432/*
433 * Event structure provided to the event handling function
434 */
435struct i2o_event {
436 struct work_struct work;
437 struct i2o_device *i2o_dev; /* I2O device pointer from which the
438 event reply was initiated */
439 u16 size; /* Size of data in 32-bit words */
440 u32 tcntxt; /* Transaction context used at
441 registration */
442 u32 event_indicator; /* Event indicator from reply */
443 u32 data[0]; /* Event data from reply */
444};
445
446/*
447 * I2O classes which could be handled by the OSM
448 */
449struct i2o_class_id {
450 u16 class_id:12;
451};
452
453/*
454 * I2O driver structure for OSMs
455 */
456struct i2o_driver {
457 char *name; /* OSM name */
458 int context; /* Low 8 bits of the transaction info */
459 struct i2o_class_id *classes; /* I2O classes that this OSM handles */
460
461 /* Message reply handler */
462 int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
463
464 /* Event handler */
465 work_func_t event;
466
467 struct workqueue_struct *event_queue; /* Event queue */
468
469 struct device_driver driver;
470
471 /* notification of changes */
472 void (*notify_controller_add) (struct i2o_controller *);
473 void (*notify_controller_remove) (struct i2o_controller *);
474 void (*notify_device_add) (struct i2o_device *);
475 void (*notify_device_remove) (struct i2o_device *);
476
477 struct semaphore lock;
478};
479
480/*
481 * Contains DMA mapped address information
482 */
483struct i2o_dma {
484 void *virt;
485 dma_addr_t phys;
486 size_t len;
487};
488
489/*
490 * Contains slab cache and mempool information
491 */
492struct i2o_pool {
493 char *name;
494 struct kmem_cache *slab;
495 mempool_t *mempool;
496};
497
498/*
499 * Contains IO mapped address information
500 */
501struct i2o_io {
502 void __iomem *virt;
503 unsigned long phys;
504 unsigned long len;
505};
506
507/*
508 * Context queue entry, used for 32-bit context on 64-bit systems
509 */
510struct i2o_context_list_element {
511 struct list_head list;
512 u32 context;
513 void *ptr;
514 unsigned long timestamp;
515};
516
517/*
518 * Each I2O controller has one of these objects
519 */
520struct i2o_controller {
521 char name[16];
522 int unit;
523 int type;
524
525 struct pci_dev *pdev; /* PCI device */
526
527 unsigned int promise:1; /* Promise controller */
528 unsigned int adaptec:1; /* DPT / Adaptec controller */
529 unsigned int raptor:1; /* split bar */
530 unsigned int no_quiesce:1; /* dont quiesce before reset */
531 unsigned int short_req:1; /* use small block sizes */
532 unsigned int limit_sectors:1; /* limit number of sectors / request */
533 unsigned int pae_support:1; /* controller has 64-bit SGL support */
534
535 struct list_head devices; /* list of I2O devices */
536 struct list_head list; /* Controller list */
537
538 void __iomem *in_port; /* Inbout port address */
539 void __iomem *out_port; /* Outbound port address */
540 void __iomem *irq_status; /* Interrupt status register address */
541 void __iomem *irq_mask; /* Interrupt mask register address */
542
543 struct i2o_dma status; /* IOP status block */
544
545 struct i2o_dma hrt; /* HW Resource Table */
546 i2o_lct *lct; /* Logical Config Table */
547 struct i2o_dma dlct; /* Temp LCT */
548 struct mutex lct_lock; /* Lock for LCT updates */
549 struct i2o_dma status_block; /* IOP status block */
550
551 struct i2o_io base; /* controller messaging unit */
552 struct i2o_io in_queue; /* inbound message queue Host->IOP */
553 struct i2o_dma out_queue; /* outbound message queue IOP->Host */
554
555 struct i2o_pool in_msg; /* mempool for inbound messages */
556
557 unsigned int battery:1; /* Has a battery backup */
558 unsigned int io_alloc:1; /* An I/O resource was allocated */
559 unsigned int mem_alloc:1; /* A memory resource was allocated */
560
561 struct resource io_resource; /* I/O resource allocated to the IOP */
562 struct resource mem_resource; /* Mem resource allocated to the IOP */
563
564 struct device device;
565 struct i2o_device *exec; /* Executive */
566#if BITS_PER_LONG == 64
567 spinlock_t context_list_lock; /* lock for context_list */
568 atomic_t context_list_counter; /* needed for unique contexts */
569 struct list_head context_list; /* list of context id's
570 and pointers */
571#endif
572 spinlock_t lock; /* lock for controller
573 configuration */
574 void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */
575};
576
577/*
578 * I2O System table entry
579 *
580 * The system table contains information about all the IOPs in the
581 * system. It is sent to all IOPs so that they can create peer2peer
582 * connections between them.
583 */
584struct i2o_sys_tbl_entry {
585 u16 org_id;
586 u16 reserved1;
587 u32 iop_id:12;
588 u32 reserved2:20;
589 u16 seg_num:12;
590 u16 i2o_version:4;
591 u8 iop_state;
592 u8 msg_type;
593 u16 frame_size;
594 u16 reserved3;
595 u32 last_changed;
596 u32 iop_capabilities;
597 u32 inbound_low;
598 u32 inbound_high;
599};
600
601struct i2o_sys_tbl {
602 u8 num_entries;
603 u8 version;
604 u16 reserved1;
605 u32 change_ind;
606 u32 reserved2;
607 u32 reserved3;
608 struct i2o_sys_tbl_entry iops[0];
609};
610
611extern struct list_head i2o_controllers;
612
613/* Message functions */
614extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
615extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
616 unsigned long, struct i2o_dma *);
617
618/* IOP functions */
619extern int i2o_status_get(struct i2o_controller *);
620
621extern int i2o_event_register(struct i2o_device *, struct i2o_driver *, int,
622 u32);
623extern struct i2o_device *i2o_iop_find_device(struct i2o_controller *, u16);
624extern struct i2o_controller *i2o_find_iop(int);
625
626/* Functions needed for handling 64-bit pointers in 32-bit context */
627#if BITS_PER_LONG == 64
628extern u32 i2o_cntxt_list_add(struct i2o_controller *, void *);
629extern void *i2o_cntxt_list_get(struct i2o_controller *, u32);
630extern u32 i2o_cntxt_list_remove(struct i2o_controller *, void *);
631extern u32 i2o_cntxt_list_get_ptr(struct i2o_controller *, void *);
632
633static inline u32 i2o_ptr_low(void *ptr)
634{
635 return (u32) (u64) ptr;
636};
637
638static inline u32 i2o_ptr_high(void *ptr)
639{
640 return (u32) ((u64) ptr >> 32);
641};
642
643static inline u32 i2o_dma_low(dma_addr_t dma_addr)
644{
645 return (u32) (u64) dma_addr;
646};
647
648static inline u32 i2o_dma_high(dma_addr_t dma_addr)
649{
650 return (u32) ((u64) dma_addr >> 32);
651};
652#else
653static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
654{
655 return (u32) ptr;
656};
657
658static inline void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
659{
660 return (void *)context;
661};
662
663static inline u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr)
664{
665 return (u32) ptr;
666};
667
668static inline u32 i2o_cntxt_list_get_ptr(struct i2o_controller *c, void *ptr)
669{
670 return (u32) ptr;
671};
672
673static inline u32 i2o_ptr_low(void *ptr)
674{
675 return (u32) ptr;
676};
677
678static inline u32 i2o_ptr_high(void *ptr)
679{
680 return 0;
681};
682
683static inline u32 i2o_dma_low(dma_addr_t dma_addr)
684{
685 return (u32) dma_addr;
686};
687
688static inline u32 i2o_dma_high(dma_addr_t dma_addr)
689{
690 return 0;
691};
692#endif
693
694extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size);
695extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
696 size_t size,
697 enum dma_data_direction direction,
698 u32 ** sg_ptr);
699extern int i2o_dma_map_sg(struct i2o_controller *c,
700 struct scatterlist *sg, int sg_count,
701 enum dma_data_direction direction,
702 u32 ** sg_ptr);
703extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len);
704extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr);
705extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
706 size_t len);
707extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
708 size_t size, int min_nr);
709extern void i2o_pool_free(struct i2o_pool *pool);
710/* I2O driver (OSM) functions */
711extern int i2o_driver_register(struct i2o_driver *);
712extern void i2o_driver_unregister(struct i2o_driver *);
713
714/**
715 * i2o_driver_notify_controller_add - Send notification of added controller
716 * @drv: I2O driver
717 * @c: I2O controller
718 *
719 * Send notification of added controller to a single registered driver.
720 */
721static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv,
722 struct i2o_controller *c)
723{
724 if (drv->notify_controller_add)
725 drv->notify_controller_add(c);
726};
727
728/**
729 * i2o_driver_notify_controller_remove - Send notification of removed controller
730 * @drv: I2O driver
731 * @c: I2O controller
732 *
733 * Send notification of removed controller to a single registered driver.
734 */
735static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv,
736 struct i2o_controller *c)
737{
738 if (drv->notify_controller_remove)
739 drv->notify_controller_remove(c);
740};
741
742/**
743 * i2o_driver_notify_device_add - Send notification of added device
744 * @drv: I2O driver
745 * @i2o_dev: the added i2o_device
746 *
747 * Send notification of added device to a single registered driver.
748 */
749static inline void i2o_driver_notify_device_add(struct i2o_driver *drv,
750 struct i2o_device *i2o_dev)
751{
752 if (drv->notify_device_add)
753 drv->notify_device_add(i2o_dev);
754};
755
756/**
757 * i2o_driver_notify_device_remove - Send notification of removed device
758 * @drv: I2O driver
759 * @i2o_dev: the added i2o_device
760 *
761 * Send notification of removed device to a single registered driver.
762 */
763static inline void i2o_driver_notify_device_remove(struct i2o_driver *drv,
764 struct i2o_device *i2o_dev)
765{
766 if (drv->notify_device_remove)
767 drv->notify_device_remove(i2o_dev);
768};
769
770extern void i2o_driver_notify_controller_add_all(struct i2o_controller *);
771extern void i2o_driver_notify_controller_remove_all(struct i2o_controller *);
772extern void i2o_driver_notify_device_add_all(struct i2o_device *);
773extern void i2o_driver_notify_device_remove_all(struct i2o_device *);
774
775/* I2O device functions */
776extern int i2o_device_claim(struct i2o_device *);
777extern int i2o_device_claim_release(struct i2o_device *);
778
779/* Exec OSM functions */
780extern int i2o_exec_lct_get(struct i2o_controller *);
781
782/* device / driver / kobject conversion functions */
783#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
784#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
785#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
786
787/**
788 * i2o_out_to_virt - Turn an I2O message to a virtual address
789 * @c: controller
790 * @m: message engine value
791 *
792 * Turn a receive message from an I2O controller bus address into
793 * a Linux virtual address. The shared page frame is a linear block
794 * so we simply have to shift the offset. This function does not
795 * work for sender side messages as they are ioremap objects
796 * provided by the I2O controller.
797 */
798static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
799 u32 m)
800{
801 BUG_ON(m < c->out_queue.phys
802 || m >= c->out_queue.phys + c->out_queue.len);
803
804 return c->out_queue.virt + (m - c->out_queue.phys);
805};
806
807/**
808 * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
809 * @c: controller
810 * @m: message engine value
811 *
812 * Turn a send message from an I2O controller bus address into
813 * a Linux virtual address. The shared page frame is a linear block
814 * so we simply have to shift the offset. This function does not
815 * work for receive side messages as they are kmalloc objects
816 * in a different pool.
817 */
818static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
819 i2o_controller *c,
820 u32 m)
821{
822 return c->in_queue.virt + m;
823};
824
825/**
826 * i2o_msg_get - obtain an I2O message from the IOP
827 * @c: I2O controller
828 *
829 * This function tries to get a message frame. If no message frame is
830 * available do not wait until one is available (see also i2o_msg_get_wait).
831 * The returned pointer to the message frame is not in I/O memory, it is
832 * allocated from a mempool. But because a MFA is allocated from the
833 * controller too it is guaranteed that i2o_msg_post() will never fail.
834 *
835 * On a success a pointer to the message frame is returned. If the message
836 * queue is empty -EBUSY is returned and if no memory is available -ENOMEM
837 * is returned.
838 */
839static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
840{
841 struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
842 if (!mmsg)
843 return ERR_PTR(-ENOMEM);
844
845 mmsg->mfa = readl(c->in_port);
846 if (unlikely(mmsg->mfa >= c->in_queue.len)) {
847 u32 mfa = mmsg->mfa;
848
849 mempool_free(mmsg, c->in_msg.mempool);
850
851 if (mfa == I2O_QUEUE_EMPTY)
852 return ERR_PTR(-EBUSY);
853 return ERR_PTR(-EFAULT);
854 }
855
856 return &mmsg->msg;
857};
858
859/**
860 * i2o_msg_post - Post I2O message to I2O controller
861 * @c: I2O controller to which the message should be send
862 * @msg: message returned by i2o_msg_get()
863 *
864 * Post the message to the I2O controller and return immediately.
865 */
866static inline void i2o_msg_post(struct i2o_controller *c,
867 struct i2o_message *msg)
868{
869 struct i2o_msg_mfa *mmsg;
870
871 mmsg = container_of(msg, struct i2o_msg_mfa, msg);
872 memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg,
873 (le32_to_cpu(msg->u.head[0]) >> 16) << 2);
874 writel(mmsg->mfa, c->in_port);
875 mempool_free(mmsg, c->in_msg.mempool);
876};
877
878/**
879 * i2o_msg_post_wait - Post and wait a message and wait until return
880 * @c: controller
881 * @msg: message to post
882 * @timeout: time in seconds to wait
883 *
884 * This API allows an OSM to post a message and then be told whether or
885 * not the system received a successful reply. If the message times out
886 * then the value '-ETIMEDOUT' is returned.
887 *
888 * Returns 0 on success or negative error code on failure.
889 */
890static inline int i2o_msg_post_wait(struct i2o_controller *c,
891 struct i2o_message *msg,
892 unsigned long timeout)
893{
894 return i2o_msg_post_wait_mem(c, msg, timeout, NULL);
895};
896
897/**
898 * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller
899 * @c: I2O controller from which the MFA was fetched
900 * @mfa: MFA which should be returned
901 *
902 * This function must be used for preserved messages, because i2o_msg_nop()
903 * also returns the allocated memory back to the msg_pool mempool.
904 */
905static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa)
906{
907 struct i2o_message __iomem *msg;
908 u32 nop[3] = {
909 THREE_WORD_MSG_SIZE | SGL_OFFSET_0,
910 I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
911 0x00000000
912 };
913
914 msg = i2o_msg_in_to_virt(c, mfa);
915 memcpy_toio(msg, nop, sizeof(nop));
916 writel(mfa, c->in_port);
917};
918
919/**
920 * i2o_msg_nop - Returns a message which is not used
921 * @c: I2O controller from which the message was created
922 * @msg: message which should be returned
923 *
924 * If you fetch a message via i2o_msg_get, and can't use it, you must
925 * return the message with this function. Otherwise the MFA is lost as well
926 * as the allocated memory from the mempool.
927 */
928static inline void i2o_msg_nop(struct i2o_controller *c,
929 struct i2o_message *msg)
930{
931 struct i2o_msg_mfa *mmsg;
932 mmsg = container_of(msg, struct i2o_msg_mfa, msg);
933
934 i2o_msg_nop_mfa(c, mmsg->mfa);
935 mempool_free(mmsg, c->in_msg.mempool);
936};
937
938/**
939 * i2o_flush_reply - Flush reply from I2O controller
940 * @c: I2O controller
941 * @m: the message identifier
942 *
943 * The I2O controller must be informed that the reply message is not needed
944 * anymore. If you forget to flush the reply, the message frame can't be
945 * used by the controller anymore and is therefore lost.
946 */
947static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
948{
949 writel(m, c->out_port);
950};
951
952/*
953 * Endian handling wrapped into the macro - keeps the core code
954 * cleaner.
955 */
956
957#define i2o_raw_writel(val, mem) __raw_writel(cpu_to_le32(val), mem)
958
959extern int i2o_parm_field_get(struct i2o_device *, int, int, void *, int);
960extern int i2o_parm_table_get(struct i2o_device *, int, int, int, void *, int,
961 void *, int);
962
963/* debugging and troubleshooting/diagnostic helpers. */
964#define osm_printk(level, format, arg...) \
965 printk(level "%s: " format, OSM_NAME , ## arg)
966
967#ifdef DEBUG
968#define osm_debug(format, arg...) \
969 osm_printk(KERN_DEBUG, format , ## arg)
970#else
971#define osm_debug(format, arg...) \
972 do { } while (0)
973#endif
974
975#define osm_err(format, arg...) \
976 osm_printk(KERN_ERR, format , ## arg)
977#define osm_info(format, arg...) \
978 osm_printk(KERN_INFO, format , ## arg)
979#define osm_warn(format, arg...) \
980 osm_printk(KERN_WARNING, format , ## arg)
981
982/* debugging functions */
983extern void i2o_report_status(const char *, const char *, struct i2o_message *);
984extern void i2o_dump_message(struct i2o_message *);
985extern void i2o_dump_hrt(struct i2o_controller *c);
986extern void i2o_debug_state(struct i2o_controller *c);
987
988#endif /* _I2O_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 4f4eea8a6288..b9c7897dc566 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1017,6 +1017,15 @@ struct ieee80211_mmie {
1017 u8 mic[8]; 1017 u8 mic[8];
1018} __packed; 1018} __packed;
1019 1019
1020/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */
1021struct ieee80211_mmie_16 {
1022 u8 element_id;
1023 u8 length;
1024 __le16 key_id;
1025 u8 sequence_number[6];
1026 u8 mic[16];
1027} __packed;
1028
1020struct ieee80211_vendor_ie { 1029struct ieee80211_vendor_ie {
1021 u8 element_id; 1030 u8 element_id;
1022 u8 len; 1031 u8 len;
@@ -1994,9 +2003,15 @@ enum ieee80211_key_len {
1994 WLAN_KEY_LEN_WEP40 = 5, 2003 WLAN_KEY_LEN_WEP40 = 5,
1995 WLAN_KEY_LEN_WEP104 = 13, 2004 WLAN_KEY_LEN_WEP104 = 13,
1996 WLAN_KEY_LEN_CCMP = 16, 2005 WLAN_KEY_LEN_CCMP = 16,
2006 WLAN_KEY_LEN_CCMP_256 = 32,
1997 WLAN_KEY_LEN_TKIP = 32, 2007 WLAN_KEY_LEN_TKIP = 32,
1998 WLAN_KEY_LEN_AES_CMAC = 16, 2008 WLAN_KEY_LEN_AES_CMAC = 16,
1999 WLAN_KEY_LEN_SMS4 = 32, 2009 WLAN_KEY_LEN_SMS4 = 32,
2010 WLAN_KEY_LEN_GCMP = 16,
2011 WLAN_KEY_LEN_GCMP_256 = 32,
2012 WLAN_KEY_LEN_BIP_CMAC_256 = 32,
2013 WLAN_KEY_LEN_BIP_GMAC_128 = 16,
2014 WLAN_KEY_LEN_BIP_GMAC_256 = 32,
2000}; 2015};
2001 2016
2002#define IEEE80211_WEP_IV_LEN 4 2017#define IEEE80211_WEP_IV_LEN 4
@@ -2004,9 +2019,16 @@ enum ieee80211_key_len {
2004#define IEEE80211_CCMP_HDR_LEN 8 2019#define IEEE80211_CCMP_HDR_LEN 8
2005#define IEEE80211_CCMP_MIC_LEN 8 2020#define IEEE80211_CCMP_MIC_LEN 8
2006#define IEEE80211_CCMP_PN_LEN 6 2021#define IEEE80211_CCMP_PN_LEN 6
2022#define IEEE80211_CCMP_256_HDR_LEN 8
2023#define IEEE80211_CCMP_256_MIC_LEN 16
2024#define IEEE80211_CCMP_256_PN_LEN 6
2007#define IEEE80211_TKIP_IV_LEN 8 2025#define IEEE80211_TKIP_IV_LEN 8
2008#define IEEE80211_TKIP_ICV_LEN 4 2026#define IEEE80211_TKIP_ICV_LEN 4
2009#define IEEE80211_CMAC_PN_LEN 6 2027#define IEEE80211_CMAC_PN_LEN 6
2028#define IEEE80211_GMAC_PN_LEN 6
2029#define IEEE80211_GCMP_HDR_LEN 8
2030#define IEEE80211_GCMP_MIC_LEN 16
2031#define IEEE80211_GCMP_PN_LEN 6
2010 2032
2011/* Public action codes */ 2033/* Public action codes */
2012enum ieee80211_pub_actioncode { 2034enum ieee80211_pub_actioncode {
@@ -2230,6 +2252,11 @@ enum ieee80211_sa_query_action {
2230#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 2252#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
2231#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 2253#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
2232#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08 2254#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
2255#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
2256#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
2257#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
2258#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
2259#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
2233 2260
2234#define WLAN_CIPHER_SUITE_SMS4 0x00147201 2261#define WLAN_CIPHER_SUITE_SMS4 0x00147201
2235 2262
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0a8ce762a47f..a57bca2ea97e 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -50,24 +50,6 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use
50typedef int br_should_route_hook_t(struct sk_buff *skb); 50typedef int br_should_route_hook_t(struct sk_buff *skb);
51extern br_should_route_hook_t __rcu *br_should_route_hook; 51extern br_should_route_hook_t __rcu *br_should_route_hook;
52 52
53#if IS_ENABLED(CONFIG_BRIDGE)
54int br_fdb_external_learn_add(struct net_device *dev,
55 const unsigned char *addr, u16 vid);
56int br_fdb_external_learn_del(struct net_device *dev,
57 const unsigned char *addr, u16 vid);
58#else
59static inline int br_fdb_external_learn_add(struct net_device *dev,
60 const unsigned char *addr, u16 vid)
61{
62 return 0;
63}
64static inline int br_fdb_external_learn_del(struct net_device *dev,
65 const unsigned char *addr, u16 vid)
66{
67 return 0;
68}
69#endif
70
71#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) 53#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
72int br_multicast_list_adjacent(struct net_device *dev, 54int br_multicast_list_adjacent(struct net_device *dev,
73 struct list_head *br_ip_list); 55 struct list_head *br_ip_list);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 515a35e2a48a..b11b28a30b9e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev)
78 return dev->priv_flags & IFF_802_1Q_VLAN; 78 return dev->priv_flags & IFF_802_1Q_VLAN;
79} 79}
80 80
81#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) 81#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
82#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) 82#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
83#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) 83#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
84 84
85/** 85/**
86 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats 86 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
376static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) 376static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
377{ 377{
378 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, 378 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
379 vlan_tx_tag_get(skb)); 379 skb_vlan_tag_get(skb));
380 if (likely(skb)) 380 if (likely(skb))
381 skb->vlan_tci = 0; 381 skb->vlan_tci = 0;
382 return skb; 382 return skb;
@@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
393 */ 393 */
394static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) 394static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
395{ 395{
396 if (vlan_tx_tag_present(skb)) 396 if (skb_vlan_tag_present(skb))
397 skb = __vlan_hwaccel_push_inside(skb); 397 skb = __vlan_hwaccel_push_inside(skb);
398 return skb; 398 return skb;
399} 399}
@@ -442,8 +442,8 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
442static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, 442static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
443 u16 *vlan_tci) 443 u16 *vlan_tci)
444{ 444{
445 if (vlan_tx_tag_present(skb)) { 445 if (skb_vlan_tag_present(skb)) {
446 *vlan_tci = vlan_tx_tag_get(skb); 446 *vlan_tci = skb_vlan_tag_get(skb);
447 return 0; 447 return 0;
448 } else { 448 } else {
449 *vlan_tci = 0; 449 *vlan_tci = 0;
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
472/** 472/**
473 * vlan_get_protocol - get protocol EtherType. 473 * vlan_get_protocol - get protocol EtherType.
474 * @skb: skbuff to query 474 * @skb: skbuff to query
475 * @type: first vlan protocol
476 * @depth: buffer to store length of eth and vlan tags in bytes
475 * 477 *
476 * Returns the EtherType of the packet, regardless of whether it is 478 * Returns the EtherType of the packet, regardless of whether it is
477 * vlan encapsulated (normal or hardware accelerated) or not. 479 * vlan encapsulated (normal or hardware accelerated) or not.
478 */ 480 */
479static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 481static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
482 int *depth)
480{ 483{
481 __be16 protocol = 0; 484 unsigned int vlan_depth = skb->mac_len;
482 485
483 if (vlan_tx_tag_present(skb) || 486 /* if type is 802.1Q/AD then the header should already be
484 skb->protocol != cpu_to_be16(ETH_P_8021Q)) 487 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
485 protocol = skb->protocol; 488 * ETH_HLEN otherwise
486 else { 489 */
487 __be16 proto, *protop; 490 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
488 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 491 if (vlan_depth) {
489 h_vlan_encapsulated_proto), 492 if (WARN_ON(vlan_depth < VLAN_HLEN))
490 sizeof(proto), &proto); 493 return 0;
491 if (likely(protop)) 494 vlan_depth -= VLAN_HLEN;
492 protocol = *protop; 495 } else {
496 vlan_depth = ETH_HLEN;
497 }
498 do {
499 struct vlan_hdr *vh;
500
501 if (unlikely(!pskb_may_pull(skb,
502 vlan_depth + VLAN_HLEN)))
503 return 0;
504
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
506 type = vh->h_vlan_encapsulated_proto;
507 vlan_depth += VLAN_HLEN;
508 } while (type == htons(ETH_P_8021Q) ||
509 type == htons(ETH_P_8021AD));
493 } 510 }
494 511
495 return protocol; 512 if (depth)
513 *depth = vlan_depth;
514
515 return type;
516}
517
518/**
519 * vlan_get_protocol - get protocol EtherType.
520 * @skb: skbuff to query
521 *
522 * Returns the EtherType of the packet, regardless of whether it is
523 * vlan encapsulated (normal or hardware accelerated) or not.
524 */
525static inline __be16 vlan_get_protocol(struct sk_buff *skb)
526{
527 return __vlan_get_protocol(skb, skb->protocol, NULL);
496} 528}
497 529
498static inline void vlan_set_encap_proto(struct sk_buff *skb, 530static inline void vlan_set_encap_proto(struct sk_buff *skb,
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 519392763393..b65850a41127 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -25,9 +25,7 @@ struct iio_buffer;
25 * available. 25 * available.
26 * @request_update: if a parameter change has been marked, update underlying 26 * @request_update: if a parameter change has been marked, update underlying
27 * storage. 27 * storage.
28 * @get_bytes_per_datum:get current bytes per datum
29 * @set_bytes_per_datum:set number of bytes per datum 28 * @set_bytes_per_datum:set number of bytes per datum
30 * @get_length: get number of datums in buffer
31 * @set_length: set number of datums in buffer 29 * @set_length: set number of datums in buffer
32 * @release: called when the last reference to the buffer is dropped, 30 * @release: called when the last reference to the buffer is dropped,
33 * should free all resources allocated by the buffer. 31 * should free all resources allocated by the buffer.
@@ -49,9 +47,7 @@ struct iio_buffer_access_funcs {
49 47
50 int (*request_update)(struct iio_buffer *buffer); 48 int (*request_update)(struct iio_buffer *buffer);
51 49
52 int (*get_bytes_per_datum)(struct iio_buffer *buffer);
53 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); 50 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
54 int (*get_length)(struct iio_buffer *buffer);
55 int (*set_length)(struct iio_buffer *buffer, int length); 51 int (*set_length)(struct iio_buffer *buffer, int length);
56 52
57 void (*release)(struct iio_buffer *buffer); 53 void (*release)(struct iio_buffer *buffer);
@@ -85,10 +81,11 @@ struct iio_buffer {
85 bool scan_timestamp; 81 bool scan_timestamp;
86 const struct iio_buffer_access_funcs *access; 82 const struct iio_buffer_access_funcs *access;
87 struct list_head scan_el_dev_attr_list; 83 struct list_head scan_el_dev_attr_list;
84 struct attribute_group buffer_group;
88 struct attribute_group scan_el_group; 85 struct attribute_group scan_el_group;
89 wait_queue_head_t pollq; 86 wait_queue_head_t pollq;
90 bool stufftoread; 87 bool stufftoread;
91 const struct attribute_group *attrs; 88 const struct attribute **attrs;
92 struct list_head demux_list; 89 struct list_head demux_list;
93 void *demux_bounce; 90 void *demux_bounce;
94 struct list_head buffer_list; 91 struct list_head buffer_list;
@@ -117,15 +114,6 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
117 struct iio_buffer *buffer, int bit); 114 struct iio_buffer *buffer, int bit);
118 115
119/** 116/**
120 * iio_scan_mask_set() - set particular bit in the scan mask
121 * @indio_dev IIO device structure
122 * @buffer: the buffer whose scan mask we are interested in
123 * @bit: the bit to be set.
124 **/
125int iio_scan_mask_set(struct iio_dev *indio_dev,
126 struct iio_buffer *buffer, int bit);
127
128/**
129 * iio_push_to_buffers() - push to a registered buffer. 117 * iio_push_to_buffers() - push to a registered buffer.
130 * @indio_dev: iio_dev structure for device. 118 * @indio_dev: iio_dev structure for device.
131 * @data: Full scan. 119 * @data: Full scan.
@@ -159,56 +147,6 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
159 147
160int iio_update_demux(struct iio_dev *indio_dev); 148int iio_update_demux(struct iio_dev *indio_dev);
161 149
162/**
163 * iio_buffer_register() - register the buffer with IIO core
164 * @indio_dev: device with the buffer to be registered
165 * @channels: the channel descriptions used to construct buffer
166 * @num_channels: the number of channels
167 **/
168int iio_buffer_register(struct iio_dev *indio_dev,
169 const struct iio_chan_spec *channels,
170 int num_channels);
171
172/**
173 * iio_buffer_unregister() - unregister the buffer from IIO core
174 * @indio_dev: the device with the buffer to be unregistered
175 **/
176void iio_buffer_unregister(struct iio_dev *indio_dev);
177
178/**
179 * iio_buffer_read_length() - attr func to get number of datums in the buffer
180 **/
181ssize_t iio_buffer_read_length(struct device *dev,
182 struct device_attribute *attr,
183 char *buf);
184/**
185 * iio_buffer_write_length() - attr func to set number of datums in the buffer
186 **/
187ssize_t iio_buffer_write_length(struct device *dev,
188 struct device_attribute *attr,
189 const char *buf,
190 size_t len);
191/**
192 * iio_buffer_store_enable() - attr to turn the buffer on
193 **/
194ssize_t iio_buffer_store_enable(struct device *dev,
195 struct device_attribute *attr,
196 const char *buf,
197 size_t len);
198/**
199 * iio_buffer_show_enable() - attr to see if the buffer is on
200 **/
201ssize_t iio_buffer_show_enable(struct device *dev,
202 struct device_attribute *attr,
203 char *buf);
204#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
205 iio_buffer_read_length, \
206 iio_buffer_write_length)
207
208#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
209 iio_buffer_show_enable, \
210 iio_buffer_store_enable)
211
212bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 150bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
213 const unsigned long *mask); 151 const unsigned long *mask);
214 152
@@ -232,16 +170,6 @@ static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
232 170
233#else /* CONFIG_IIO_BUFFER */ 171#else /* CONFIG_IIO_BUFFER */
234 172
235static inline int iio_buffer_register(struct iio_dev *indio_dev,
236 const struct iio_chan_spec *channels,
237 int num_channels)
238{
239 return 0;
240}
241
242static inline void iio_buffer_unregister(struct iio_dev *indio_dev)
243{}
244
245static inline void iio_buffer_get(struct iio_buffer *buffer) {} 173static inline void iio_buffer_get(struct iio_buffer *buffer) {}
246static inline void iio_buffer_put(struct iio_buffer *buffer) {} 174static inline void iio_buffer_put(struct iio_buffer *buffer) {}
247 175
diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h
new file mode 100644
index 000000000000..f4d1b0edb432
--- /dev/null
+++ b/include/linux/iio/common/ssp_sensors.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#ifndef _SSP_SENSORS_H_
16#define _SSP_SENSORS_H_
17
18#include <linux/iio/iio.h>
19
20#define SSP_TIME_SIZE 4
21#define SSP_ACCELEROMETER_SIZE 6
22#define SSP_GYROSCOPE_SIZE 6
23#define SSP_BIO_HRM_RAW_SIZE 8
24#define SSP_BIO_HRM_RAW_FAC_SIZE 36
25#define SSP_BIO_HRM_LIB_SIZE 8
26
27/**
28 * enum ssp_sensor_type - SSP sensor type
29 */
30enum ssp_sensor_type {
31 SSP_ACCELEROMETER_SENSOR = 0,
32 SSP_GYROSCOPE_SENSOR,
33 SSP_GEOMAGNETIC_UNCALIB_SENSOR,
34 SSP_GEOMAGNETIC_RAW,
35 SSP_GEOMAGNETIC_SENSOR,
36 SSP_PRESSURE_SENSOR,
37 SSP_GESTURE_SENSOR,
38 SSP_PROXIMITY_SENSOR,
39 SSP_TEMPERATURE_HUMIDITY_SENSOR,
40 SSP_LIGHT_SENSOR,
41 SSP_PROXIMITY_RAW,
42 SSP_ORIENTATION_SENSOR,
43 SSP_STEP_DETECTOR,
44 SSP_SIG_MOTION_SENSOR,
45 SSP_GYRO_UNCALIB_SENSOR,
46 SSP_GAME_ROTATION_VECTOR,
47 SSP_ROTATION_VECTOR,
48 SSP_STEP_COUNTER,
49 SSP_BIO_HRM_RAW,
50 SSP_BIO_HRM_RAW_FAC,
51 SSP_BIO_HRM_LIB,
52 SSP_SENSOR_MAX,
53};
54
55struct ssp_data;
56
57/**
58 * struct ssp_sensor_data - Sensor object
59 * @process_data: Callback to feed sensor data.
60 * @type: Used sensor type.
61 * @buffer: Received data buffer.
62 */
63struct ssp_sensor_data {
64 int (*process_data)(struct iio_dev *indio_dev, void *buf,
65 int64_t timestamp);
66 enum ssp_sensor_type type;
67 u8 *buffer;
68};
69
70void ssp_register_consumer(struct iio_dev *indio_dev,
71 enum ssp_sensor_type type);
72
73int ssp_enable_sensor(struct ssp_data *data, enum ssp_sensor_type type,
74 u32 delay);
75
76int ssp_disable_sensor(struct ssp_data *data, enum ssp_sensor_type type);
77
78u32 ssp_get_sensor_delay(struct ssp_data *data, enum ssp_sensor_type);
79
80int ssp_change_delay(struct ssp_data *data, enum ssp_sensor_type type,
81 u32 delay);
82#endif /* _SSP_SENSORS_H_ */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 651f9a0e2765..26fb8f6342bb 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -151,6 +151,16 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
151int iio_read_channel_processed(struct iio_channel *chan, int *val); 151int iio_read_channel_processed(struct iio_channel *chan, int *val);
152 152
153/** 153/**
154 * iio_write_channel_raw() - write to a given channel
155 * @chan: The channel being queried.
156 * @val: Value being written.
157 *
158 * Note raw writes to iio channels are in dac counts and hence
159 * scale will need to be applied if standard units required.
160 */
161int iio_write_channel_raw(struct iio_channel *chan, int val);
162
163/**
154 * iio_get_channel_type() - get the type of a channel 164 * iio_get_channel_type() - get the type of a channel
155 * @channel: The channel being queried. 165 * @channel: The channel being queried.
156 * @type: The type of the channel. 166 * @type: The type of the channel.
@@ -191,7 +201,7 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
191 * The scale factor allows to increase the precession of the returned value. For 201 * The scale factor allows to increase the precession of the returned value. For
192 * a scale factor of 1 the function will return the result in the normal IIO 202 * a scale factor of 1 the function will return the result in the normal IIO
193 * unit for the channel type. E.g. millivolt for voltage channels, if you want 203 * unit for the channel type. E.g. millivolt for voltage channels, if you want
194 * nanovolts instead pass 1000 as the scale factor. 204 * nanovolts instead pass 1000000 as the scale factor.
195 */ 205 */
196int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, 206int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
197 int *processed, unsigned int scale); 207 int *processed, unsigned int scale);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 3642ce7ef512..80d855061064 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -38,6 +38,11 @@ enum iio_chan_info_enum {
38 IIO_CHAN_INFO_HARDWAREGAIN, 38 IIO_CHAN_INFO_HARDWAREGAIN,
39 IIO_CHAN_INFO_HYSTERESIS, 39 IIO_CHAN_INFO_HYSTERESIS,
40 IIO_CHAN_INFO_INT_TIME, 40 IIO_CHAN_INFO_INT_TIME,
41 IIO_CHAN_INFO_ENABLE,
42 IIO_CHAN_INFO_CALIBHEIGHT,
43 IIO_CHAN_INFO_CALIBWEIGHT,
44 IIO_CHAN_INFO_DEBOUNCE_COUNT,
45 IIO_CHAN_INFO_DEBOUNCE_TIME,
41}; 46};
42 47
43enum iio_shared_by { 48enum iio_shared_by {
@@ -284,10 +289,11 @@ static inline s64 iio_get_time_ns(void)
284/* Device operating modes */ 289/* Device operating modes */
285#define INDIO_DIRECT_MODE 0x01 290#define INDIO_DIRECT_MODE 0x01
286#define INDIO_BUFFER_TRIGGERED 0x02 291#define INDIO_BUFFER_TRIGGERED 0x02
292#define INDIO_BUFFER_SOFTWARE 0x04
287#define INDIO_BUFFER_HARDWARE 0x08 293#define INDIO_BUFFER_HARDWARE 0x08
288 294
289#define INDIO_ALL_BUFFER_MODES \ 295#define INDIO_ALL_BUFFER_MODES \
290 (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE) 296 (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
291 297
292#define INDIO_MAX_RAW_ELEMENTS 4 298#define INDIO_MAX_RAW_ELEMENTS 4
293 299
@@ -591,7 +597,8 @@ void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
591static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) 597static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
592{ 598{
593 return indio_dev->currentmode 599 return indio_dev->currentmode
594 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE); 600 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
601 INDIO_BUFFER_SOFTWARE);
595} 602}
596 603
597/** 604/**
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 25eeac762e84..1683bc710d14 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -5,7 +5,10 @@
5#include <linux/iio/iio.h> 5#include <linux/iio/iio.h>
6#include <linux/iio/buffer.h> 6#include <linux/iio/buffer.h>
7 7
8struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev); 8struct iio_buffer *iio_kfifo_allocate(void);
9void iio_kfifo_free(struct iio_buffer *r); 9void iio_kfifo_free(struct iio_buffer *r);
10 10
11struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
12void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
13
11#endif 14#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 4a2af8adf874..580ed5bdb3fa 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -30,6 +30,11 @@ enum iio_chan_type {
30 IIO_CCT, 30 IIO_CCT,
31 IIO_PRESSURE, 31 IIO_PRESSURE,
32 IIO_HUMIDITYRELATIVE, 32 IIO_HUMIDITYRELATIVE,
33 IIO_ACTIVITY,
34 IIO_STEPS,
35 IIO_ENERGY,
36 IIO_DISTANCE,
37 IIO_VELOCITY,
33}; 38};
34 39
35enum iio_modifier { 40enum iio_modifier {
@@ -59,7 +64,12 @@ enum iio_modifier {
59 IIO_MOD_NORTH_MAGN, 64 IIO_MOD_NORTH_MAGN,
60 IIO_MOD_NORTH_TRUE, 65 IIO_MOD_NORTH_TRUE,
61 IIO_MOD_NORTH_MAGN_TILT_COMP, 66 IIO_MOD_NORTH_MAGN_TILT_COMP,
62 IIO_MOD_NORTH_TRUE_TILT_COMP 67 IIO_MOD_NORTH_TRUE_TILT_COMP,
68 IIO_MOD_RUNNING,
69 IIO_MOD_JOGGING,
70 IIO_MOD_WALKING,
71 IIO_MOD_STILL,
72 IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z,
63}; 73};
64 74
65enum iio_event_type { 75enum iio_event_type {
@@ -68,6 +78,7 @@ enum iio_event_type {
68 IIO_EV_TYPE_ROC, 78 IIO_EV_TYPE_ROC,
69 IIO_EV_TYPE_THRESH_ADAPTIVE, 79 IIO_EV_TYPE_THRESH_ADAPTIVE,
70 IIO_EV_TYPE_MAG_ADAPTIVE, 80 IIO_EV_TYPE_MAG_ADAPTIVE,
81 IIO_EV_TYPE_CHANGE,
71}; 82};
72 83
73enum iio_event_info { 84enum iio_event_info {
@@ -81,6 +92,7 @@ enum iio_event_direction {
81 IIO_EV_DIR_EITHER, 92 IIO_EV_DIR_EITHER,
82 IIO_EV_DIR_RISING, 93 IIO_EV_DIR_RISING,
83 IIO_EV_DIR_FALLING, 94 IIO_EV_DIR_FALLING,
95 IIO_EV_DIR_NONE,
84}; 96};
85 97
86#define IIO_VAL_INT 1 98#define IIO_VAL_INT 1
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3037fc085e8e..696d22312b31 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -175,6 +175,13 @@ extern struct task_group root_task_group;
175# define INIT_NUMA_BALANCING(tsk) 175# define INIT_NUMA_BALANCING(tsk)
176#endif 176#endif
177 177
178#ifdef CONFIG_KASAN
179# define INIT_KASAN(tsk) \
180 .kasan_depth = 1,
181#else
182# define INIT_KASAN(tsk)
183#endif
184
178/* 185/*
179 * INIT_TASK is used to set up the first task table, touch at 186 * INIT_TASK is used to set up the first task table, touch at
180 * your own risk!. Base=0, limit=0x1fffff (=2MB) 187 * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -193,6 +200,9 @@ extern struct task_group root_task_group;
193 .nr_cpus_allowed= NR_CPUS, \ 200 .nr_cpus_allowed= NR_CPUS, \
194 .mm = NULL, \ 201 .mm = NULL, \
195 .active_mm = &init_mm, \ 202 .active_mm = &init_mm, \
203 .restart_block = { \
204 .fn = do_no_restart_syscall, \
205 }, \
196 .se = { \ 206 .se = { \
197 .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ 207 .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
198 }, \ 208 }, \
@@ -247,6 +257,7 @@ extern struct task_group root_task_group;
247 INIT_RT_MUTEXES(tsk) \ 257 INIT_RT_MUTEXES(tsk) \
248 INIT_VTIME(tsk) \ 258 INIT_VTIME(tsk) \
249 INIT_NUMA_BALANCING(tsk) \ 259 INIT_NUMA_BALANCING(tsk) \
260 INIT_KASAN(tsk) \
250} 261}
251 262
252 263
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index f583ff639776..d7188de4db96 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -119,7 +119,8 @@ struct input_mt_pos {
119}; 119};
120 120
121int input_mt_assign_slots(struct input_dev *dev, int *slots, 121int input_mt_assign_slots(struct input_dev *dev, int *slots,
122 const struct input_mt_pos *pos, int num_pos); 122 const struct input_mt_pos *pos, int num_pos,
123 int dmax);
123 124
124int input_mt_get_slot_by_key(struct input_dev *dev, int key); 125int input_mt_get_slot_by_key(struct input_dev *dev, int key);
125 126
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 000000000000..1c30014ed176
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef _LINUX_IOPOLL_H
16#define _LINUX_IOPOLL_H
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/hrtimer.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24
25/**
26 * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
27 * @op: accessor function (takes @addr as its only argument)
28 * @addr: Address to poll
29 * @val: Variable to read the value into
30 * @cond: Break condition (usually involving @val)
31 * @sleep_us: Maximum time to sleep between reads in us (0
32 * tight-loops). Should be less than ~20ms since usleep_range
33 * is used (see Documentation/timers/timers-howto.txt).
34 * @timeout_us: Timeout in us, 0 means never timeout
35 *
36 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
37 * case, the last read value at @addr is stored in @val. Must not
38 * be called from atomic context if sleep_us or timeout_us are used.
39 *
40 * When available, you'll probably want to use one of the specialized
41 * macros defined below rather than this macro directly.
42 */
43#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
44({ \
45 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
46 might_sleep_if(sleep_us); \
47 for (;;) { \
48 (val) = op(addr); \
49 if (cond) \
50 break; \
51 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
52 (val) = op(addr); \
53 break; \
54 } \
55 if (sleep_us) \
56 usleep_range((sleep_us >> 2) + 1, sleep_us); \
57 } \
58 (cond) ? 0 : -ETIMEDOUT; \
59})
60
61/**
62 * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
63 * @op: accessor function (takes @addr as its only argument)
64 * @addr: Address to poll
65 * @val: Variable to read the value into
66 * @cond: Break condition (usually involving @val)
67 * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
68 * be less than ~10us since udelay is used (see
69 * Documentation/timers/timers-howto.txt).
70 * @timeout_us: Timeout in us, 0 means never timeout
71 *
72 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
73 * case, the last read value at @addr is stored in @val.
74 *
75 * When available, you'll probably want to use one of the specialized
76 * macros defined below rather than this macro directly.
77 */
78#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
79({ \
80 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
81 for (;;) { \
82 (val) = op(addr); \
83 if (cond) \
84 break; \
85 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
86 (val) = op(addr); \
87 break; \
88 } \
89 if (delay_us) \
90 udelay(delay_us); \
91 } \
92 (cond) ? 0 : -ETIMEDOUT; \
93})
94
95
96#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
97 readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
98
99#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
100 readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
101
102#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
103 readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
104
105#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
106 readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
107
108#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
109 readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
110
111#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
112 readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
113
114#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
115 readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
116
117#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
118 readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
119
120#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
121 readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
122
123#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
124 readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
125
126#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
127 readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
128
129#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
130 readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
131
132#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
133 readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
134
135#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
136 readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
137
138#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
139 readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
140
141#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
142 readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
143
144#endif /* _LINUX_IOPOLL_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 19e81d5ccb6d..3920a19d8194 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -16,9 +16,6 @@
16#include <linux/rbtree.h> 16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18 18
19/* IO virtual address start page frame number */
20#define IOVA_START_PFN (1)
21
22/* iova structure */ 19/* iova structure */
23struct iova { 20struct iova {
24 struct rb_node node; 21 struct rb_node node;
@@ -31,6 +28,8 @@ struct iova_domain {
31 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 28 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
32 struct rb_root rbroot; /* iova domain rbtree root */ 29 struct rb_root rbroot; /* iova domain rbtree root */
33 struct rb_node *cached32_node; /* Save last alloced node */ 30 struct rb_node *cached32_node; /* Save last alloced node */
31 unsigned long granule; /* pfn granularity for this domain */
32 unsigned long start_pfn; /* Lower limit for this domain */
34 unsigned long dma_32bit_pfn; 33 unsigned long dma_32bit_pfn;
35}; 34};
36 35
@@ -39,6 +38,39 @@ static inline unsigned long iova_size(struct iova *iova)
39 return iova->pfn_hi - iova->pfn_lo + 1; 38 return iova->pfn_hi - iova->pfn_lo + 1;
40} 39}
41 40
41static inline unsigned long iova_shift(struct iova_domain *iovad)
42{
43 return __ffs(iovad->granule);
44}
45
46static inline unsigned long iova_mask(struct iova_domain *iovad)
47{
48 return iovad->granule - 1;
49}
50
51static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
52{
53 return iova & iova_mask(iovad);
54}
55
56static inline size_t iova_align(struct iova_domain *iovad, size_t size)
57{
58 return ALIGN(size, iovad->granule);
59}
60
61static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
62{
63 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
64}
65
66static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
67{
68 return iova >> iova_shift(iovad);
69}
70
71int iommu_iova_cache_init(void);
72void iommu_iova_cache_destroy(void);
73
42struct iova *alloc_iova_mem(void); 74struct iova *alloc_iova_mem(void);
43void free_iova_mem(struct iova *iova); 75void free_iova_mem(struct iova *iova);
44void free_iova(struct iova_domain *iovad, unsigned long pfn); 76void free_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -49,7 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
49struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 81struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
50 unsigned long pfn_hi); 82 unsigned long pfn_hi);
51void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 83void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
52void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); 84void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
85 unsigned long start_pfn, unsigned long pfn_32bit);
53struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 86struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
54void put_iova_domain(struct iova_domain *iovad); 87void put_iova_domain(struct iova_domain *iovad);
55struct iova *split_and_remove_iova(struct iova_domain *iovad, 88struct iova *split_and_remove_iova(struct iova_domain *iovad,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c694e7baa621..4d5169f5d7d1 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -52,6 +52,7 @@ struct ipv6_devconf {
52 __s32 force_tllao; 52 __s32 force_tllao;
53 __s32 ndisc_notify; 53 __s32 ndisc_notify;
54 __s32 suppress_frag_ndisc; 54 __s32 suppress_frag_ndisc;
55 __s32 accept_ra_mtu;
55 void *sysctl; 56 void *sysctl;
56}; 57};
57 58
@@ -124,6 +125,12 @@ struct ipv6_mc_socklist;
124struct ipv6_ac_socklist; 125struct ipv6_ac_socklist;
125struct ipv6_fl_socklist; 126struct ipv6_fl_socklist;
126 127
128struct inet6_cork {
129 struct ipv6_txoptions *opt;
130 u8 hop_limit;
131 u8 tclass;
132};
133
127/** 134/**
128 * struct ipv6_pinfo - ipv6 private area 135 * struct ipv6_pinfo - ipv6 private area
129 * 136 *
@@ -216,11 +223,7 @@ struct ipv6_pinfo {
216 struct ipv6_txoptions *opt; 223 struct ipv6_txoptions *opt;
217 struct sk_buff *pktoptions; 224 struct sk_buff *pktoptions;
218 struct sk_buff *rxpmtu; 225 struct sk_buff *rxpmtu;
219 struct { 226 struct inet6_cork cork;
220 struct ipv6_txoptions *opt;
221 u8 hop_limit;
222 u8 tclass;
223 } cork;
224}; 227};
225 228
226/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */ 229/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1e8b0cf30792..800544bc7bfd 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -33,6 +33,7 @@
33#define GICD_SETSPI_SR 0x0050 33#define GICD_SETSPI_SR 0x0050
34#define GICD_CLRSPI_SR 0x0058 34#define GICD_CLRSPI_SR 0x0058
35#define GICD_SEIR 0x0068 35#define GICD_SEIR 0x0068
36#define GICD_IGROUPR 0x0080
36#define GICD_ISENABLER 0x0100 37#define GICD_ISENABLER 0x0100
37#define GICD_ICENABLER 0x0180 38#define GICD_ICENABLER 0x0180
38#define GICD_ISPENDR 0x0200 39#define GICD_ISPENDR 0x0200
@@ -41,14 +42,37 @@
41#define GICD_ICACTIVER 0x0380 42#define GICD_ICACTIVER 0x0380
42#define GICD_IPRIORITYR 0x0400 43#define GICD_IPRIORITYR 0x0400
43#define GICD_ICFGR 0x0C00 44#define GICD_ICFGR 0x0C00
45#define GICD_IGRPMODR 0x0D00
46#define GICD_NSACR 0x0E00
44#define GICD_IROUTER 0x6000 47#define GICD_IROUTER 0x6000
48#define GICD_IDREGS 0xFFD0
45#define GICD_PIDR2 0xFFE8 49#define GICD_PIDR2 0xFFE8
46 50
51/*
52 * Those registers are actually from GICv2, but the spec demands that they
53 * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
54 */
55#define GICD_ITARGETSR 0x0800
56#define GICD_SGIR 0x0F00
57#define GICD_CPENDSGIR 0x0F10
58#define GICD_SPENDSGIR 0x0F20
59
47#define GICD_CTLR_RWP (1U << 31) 60#define GICD_CTLR_RWP (1U << 31)
61#define GICD_CTLR_DS (1U << 6)
48#define GICD_CTLR_ARE_NS (1U << 4) 62#define GICD_CTLR_ARE_NS (1U << 4)
49#define GICD_CTLR_ENABLE_G1A (1U << 1) 63#define GICD_CTLR_ENABLE_G1A (1U << 1)
50#define GICD_CTLR_ENABLE_G1 (1U << 0) 64#define GICD_CTLR_ENABLE_G1 (1U << 0)
51 65
66/*
67 * In systems with a single security state (what we emulate in KVM)
68 * the meaning of the interrupt group enable bits is slightly different
69 */
70#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
71#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
72
73#define GICD_TYPER_LPIS (1U << 17)
74#define GICD_TYPER_MBIS (1U << 16)
75
52#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) 76#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
53#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) 77#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
54#define GICD_TYPER_LPIS (1U << 17) 78#define GICD_TYPER_LPIS (1U << 17)
@@ -60,6 +84,8 @@
60#define GIC_PIDR2_ARCH_GICv3 0x30 84#define GIC_PIDR2_ARCH_GICv3 0x30
61#define GIC_PIDR2_ARCH_GICv4 0x40 85#define GIC_PIDR2_ARCH_GICv4 0x40
62 86
87#define GIC_V3_DIST_SIZE 0x10000
88
63/* 89/*
64 * Re-Distributor registers, offsets from RD_base 90 * Re-Distributor registers, offsets from RD_base
65 */ 91 */
@@ -78,6 +104,7 @@
78#define GICR_SYNCR 0x00C0 104#define GICR_SYNCR 0x00C0
79#define GICR_MOVLPIR 0x0100 105#define GICR_MOVLPIR 0x0100
80#define GICR_MOVALLR 0x0110 106#define GICR_MOVALLR 0x0110
107#define GICR_IDREGS GICD_IDREGS
81#define GICR_PIDR2 GICD_PIDR2 108#define GICR_PIDR2 GICD_PIDR2
82 109
83#define GICR_CTLR_ENABLE_LPIS (1UL << 0) 110#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
@@ -104,6 +131,7 @@
104/* 131/*
105 * Re-Distributor registers, offsets from SGI_base 132 * Re-Distributor registers, offsets from SGI_base
106 */ 133 */
134#define GICR_IGROUPR0 GICD_IGROUPR
107#define GICR_ISENABLER0 GICD_ISENABLER 135#define GICR_ISENABLER0 GICD_ISENABLER
108#define GICR_ICENABLER0 GICD_ICENABLER 136#define GICR_ICENABLER0 GICD_ICENABLER
109#define GICR_ISPENDR0 GICD_ISPENDR 137#define GICR_ISPENDR0 GICD_ISPENDR
@@ -112,11 +140,15 @@
112#define GICR_ICACTIVER0 GICD_ICACTIVER 140#define GICR_ICACTIVER0 GICD_ICACTIVER
113#define GICR_IPRIORITYR0 GICD_IPRIORITYR 141#define GICR_IPRIORITYR0 GICD_IPRIORITYR
114#define GICR_ICFGR0 GICD_ICFGR 142#define GICR_ICFGR0 GICD_ICFGR
143#define GICR_IGRPMODR0 GICD_IGRPMODR
144#define GICR_NSACR GICD_NSACR
115 145
116#define GICR_TYPER_PLPIS (1U << 0) 146#define GICR_TYPER_PLPIS (1U << 0)
117#define GICR_TYPER_VLPIS (1U << 1) 147#define GICR_TYPER_VLPIS (1U << 1)
118#define GICR_TYPER_LAST (1U << 4) 148#define GICR_TYPER_LAST (1U << 4)
119 149
150#define GIC_V3_REDIST_SIZE 0x20000
151
120#define LPI_PROP_GROUP1 (1 << 1) 152#define LPI_PROP_GROUP1 (1 << 1)
121#define LPI_PROP_ENABLED (1 << 0) 153#define LPI_PROP_ENABLED (1 << 0)
122 154
@@ -248,6 +280,18 @@
248#define ICC_SRE_EL2_SRE (1 << 0) 280#define ICC_SRE_EL2_SRE (1 << 0)
249#define ICC_SRE_EL2_ENABLE (1 << 3) 281#define ICC_SRE_EL2_ENABLE (1 << 3)
250 282
283#define ICC_SGI1R_TARGET_LIST_SHIFT 0
284#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
285#define ICC_SGI1R_AFFINITY_1_SHIFT 16
286#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
287#define ICC_SGI1R_SGI_ID_SHIFT 24
288#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
289#define ICC_SGI1R_AFFINITY_2_SHIFT 32
290#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
291#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
292#define ICC_SGI1R_AFFINITY_3_SHIFT 48
293#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
294
251/* 295/*
252 * System register definitions 296 * System register definitions
253 */ 297 */
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index e06b370cfc0d..2e3d1afeb674 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -18,9 +18,7 @@
18#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H 18#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
19#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H 19#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
20 20
21void omap2_init_irq(void);
22void omap3_init_irq(void); 21void omap3_init_irq(void);
23void ti81xx_init_irq(void);
24 22
25int omap_irq_pending(void); 23int omap_irq_pending(void);
26void omap_intc_save_context(void); 24void omap_intc_save_context(void);
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 31229e0be90b..d32615280be9 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -956,15 +956,6 @@ void __log_wait_for_space(journal_t *journal);
956extern void __journal_drop_transaction(journal_t *, transaction_t *); 956extern void __journal_drop_transaction(journal_t *, transaction_t *);
957extern int cleanup_journal_tail(journal_t *); 957extern int cleanup_journal_tail(journal_t *);
958 958
959/* Debugging code only: */
960
961#define jbd_ENOSYS() \
962do { \
963 printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
964 current->state = TASK_UNINTERRUPTIBLE; \
965 schedule(); \
966} while (1)
967
968/* 959/*
969 * is_journal_abort 960 * is_journal_abort
970 * 961 *
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 704b9a599b26..20e7f78041c8 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1251,15 +1251,6 @@ void __jbd2_log_wait_for_space(journal_t *journal);
1251extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); 1251extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
1252extern int jbd2_cleanup_journal_tail(journal_t *); 1252extern int jbd2_cleanup_journal_tail(journal_t *);
1253 1253
1254/* Debugging code only: */
1255
1256#define jbd_ENOSYS() \
1257do { \
1258 printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
1259 current->state = TASK_UNINTERRUPTIBLE; \
1260 schedule(); \
1261} while (1)
1262
1263/* 1254/*
1264 * is_journal_abort 1255 * is_journal_abort
1265 * 1256 *
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
new file mode 100644
index 000000000000..72ba725ddf9c
--- /dev/null
+++ b/include/linux/kasan.h
@@ -0,0 +1,89 @@
1#ifndef _LINUX_KASAN_H
2#define _LINUX_KASAN_H
3
4#include <linux/types.h>
5
6struct kmem_cache;
7struct page;
8
9#ifdef CONFIG_KASAN
10
11#define KASAN_SHADOW_SCALE_SHIFT 3
12#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
13
14#include <asm/kasan.h>
15#include <linux/sched.h>
16
17static inline void *kasan_mem_to_shadow(const void *addr)
18{
19 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
20 + KASAN_SHADOW_OFFSET;
21}
22
23/* Enable reporting bugs after kasan_disable_current() */
24static inline void kasan_enable_current(void)
25{
26 current->kasan_depth++;
27}
28
29/* Disable reporting bugs for current task */
30static inline void kasan_disable_current(void)
31{
32 current->kasan_depth--;
33}
34
35void kasan_unpoison_shadow(const void *address, size_t size);
36
37void kasan_alloc_pages(struct page *page, unsigned int order);
38void kasan_free_pages(struct page *page, unsigned int order);
39
40void kasan_poison_slab(struct page *page);
41void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
42void kasan_poison_object_data(struct kmem_cache *cache, void *object);
43
44void kasan_kmalloc_large(const void *ptr, size_t size);
45void kasan_kfree_large(const void *ptr);
46void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
47void kasan_krealloc(const void *object, size_t new_size);
48
49void kasan_slab_alloc(struct kmem_cache *s, void *object);
50void kasan_slab_free(struct kmem_cache *s, void *object);
51
52#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
53
54int kasan_module_alloc(void *addr, size_t size);
55void kasan_module_free(void *addr);
56
57#else /* CONFIG_KASAN */
58
59#define MODULE_ALIGN 1
60
61static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
62
63static inline void kasan_enable_current(void) {}
64static inline void kasan_disable_current(void) {}
65
66static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
67static inline void kasan_free_pages(struct page *page, unsigned int order) {}
68
69static inline void kasan_poison_slab(struct page *page) {}
70static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
71 void *object) {}
72static inline void kasan_poison_object_data(struct kmem_cache *cache,
73 void *object) {}
74
75static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
76static inline void kasan_kfree_large(const void *ptr) {}
77static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
78 size_t size) {}
79static inline void kasan_krealloc(const void *object, size_t new_size) {}
80
81static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
82static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
83
84static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
85static inline void kasan_module_free(void *addr) {}
86
87#endif /* CONFIG_KASAN */
88
89#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 290db1269c4c..75ae2e2631fc 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,11 +13,54 @@
13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> 13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
14 */ 14 */
15 15
16/* Shifted versions of the command enable bits are be used if the command
17 * has no arguments (see kdb_check_flags). This allows commands, such as
18 * go, to have different permissions depending upon whether it is called
19 * with an argument.
20 */
21#define KDB_ENABLE_NO_ARGS_SHIFT 10
22
16typedef enum { 23typedef enum {
17 KDB_REPEAT_NONE = 0, /* Do not repeat this command */ 24 KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
18 KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ 25 KDB_ENABLE_MEM_READ = (1 << 1),
19 KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ 26 KDB_ENABLE_MEM_WRITE = (1 << 2),
20} kdb_repeat_t; 27 KDB_ENABLE_REG_READ = (1 << 3),
28 KDB_ENABLE_REG_WRITE = (1 << 4),
29 KDB_ENABLE_INSPECT = (1 << 5),
30 KDB_ENABLE_FLOW_CTRL = (1 << 6),
31 KDB_ENABLE_SIGNAL = (1 << 7),
32 KDB_ENABLE_REBOOT = (1 << 8),
33 /* User exposed values stop here, all remaining flags are
34 * exclusively used to describe a commands behaviour.
35 */
36
37 KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
38 KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
39
40 KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
41 KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
42 << KDB_ENABLE_NO_ARGS_SHIFT,
43 KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
44 << KDB_ENABLE_NO_ARGS_SHIFT,
45 KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
46 << KDB_ENABLE_NO_ARGS_SHIFT,
47 KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
48 << KDB_ENABLE_NO_ARGS_SHIFT,
49 KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
50 << KDB_ENABLE_NO_ARGS_SHIFT,
51 KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
52 << KDB_ENABLE_NO_ARGS_SHIFT,
53 KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
54 << KDB_ENABLE_NO_ARGS_SHIFT,
55 KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
56 << KDB_ENABLE_NO_ARGS_SHIFT,
57 KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
58 << KDB_ENABLE_NO_ARGS_SHIFT,
59 KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
60
61 KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
62 KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
63} kdb_cmdflags_t;
21 64
22typedef int (*kdb_func_t)(int, const char **); 65typedef int (*kdb_func_t)(int, const char **);
23 66
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
62#define KDB_BADLENGTH (-19) 105#define KDB_BADLENGTH (-19)
63#define KDB_NOBP (-20) 106#define KDB_NOBP (-20)
64#define KDB_BADADDR (-21) 107#define KDB_BADADDR (-21)
108#define KDB_NOPERM (-22)
65 109
66/* 110/*
67 * kdb_diemsg 111 * kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
146 190
147/* Dynamic kdb shell command registration */ 191/* Dynamic kdb shell command registration */
148extern int kdb_register(char *, kdb_func_t, char *, char *, short); 192extern int kdb_register(char *, kdb_func_t, char *, char *, short);
149extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, 193extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
150 short, kdb_repeat_t); 194 short, kdb_cmdflags_t);
151extern int kdb_unregister(char *); 195extern int kdb_unregister(char *);
152#else /* ! CONFIG_KGDB_KDB */ 196#else /* ! CONFIG_KGDB_KDB */
153static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } 197static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
154static inline void kdb_init(int level) {} 198static inline void kdb_init(int level) {}
155static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, 199static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
156 char *help, short minlen) { return 0; } 200 char *help, short minlen) { return 0; }
157static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, 201static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
158 char *help, short minlen, 202 char *help, short minlen,
159 kdb_repeat_t repeat) { return 0; } 203 kdb_cmdflags_t flags) { return 0; }
160static inline int kdb_unregister(char *cmd) { return 0; } 204static inline int kdb_unregister(char *cmd) { return 0; }
161#endif /* CONFIG_KGDB_KDB */ 205#endif /* CONFIG_KGDB_KDB */
162enum { 206enum {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5449d2f4a1ef..d6d630d31ef3 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -176,7 +176,7 @@ extern int _cond_resched(void);
176 */ 176 */
177# define might_sleep() \ 177# define might_sleep() \
178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) 178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
179# define sched_annotate_sleep() __set_current_state(TASK_RUNNING) 179# define sched_annotate_sleep() (current->task_state_change = 0)
180#else 180#else
181 static inline void ___might_sleep(const char *file, int line, 181 static inline void ___might_sleep(const char *file, int line,
182 int preempt_offset) { } 182 int preempt_offset) { }
@@ -471,6 +471,7 @@ extern enum system_states {
471#define TAINT_OOT_MODULE 12 471#define TAINT_OOT_MODULE 12
472#define TAINT_UNSIGNED_MODULE 13 472#define TAINT_UNSIGNED_MODULE 13
473#define TAINT_SOFTLOCKUP 14 473#define TAINT_SOFTLOCKUP 14
474#define TAINT_LIVEPATCH 15
474 475
475extern const char hex_asc[]; 476extern const char hex_asc[];
476#define hex_asc_lo(x) hex_asc[((x) & 0x0f)] 477#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
@@ -799,9 +800,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
799 const typeof( ((type *)0)->member ) *__mptr = (ptr); \ 800 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
800 (type *)( (char *)__mptr - offsetof(type,member) );}) 801 (type *)( (char *)__mptr - offsetof(type,member) );})
801 802
802/* Trap pasters of __FUNCTION__ at compile-time */
803#define __FUNCTION__ (__func__)
804
805/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ 803/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
806#ifdef CONFIG_FTRACE_MCOUNT_RECORD 804#ifdef CONFIG_FTRACE_MCOUNT_RECORD
807# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD 805# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index d4e01b358341..71ecdab1671b 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -43,7 +43,6 @@ enum kernfs_node_flag {
43 KERNFS_HAS_SEQ_SHOW = 0x0040, 43 KERNFS_HAS_SEQ_SHOW = 0x0040,
44 KERNFS_HAS_MMAP = 0x0080, 44 KERNFS_HAS_MMAP = 0x0080,
45 KERNFS_LOCKDEP = 0x0100, 45 KERNFS_LOCKDEP = 0x0100,
46 KERNFS_STATIC_NAME = 0x0200,
47 KERNFS_SUICIDAL = 0x0400, 46 KERNFS_SUICIDAL = 0x0400,
48 KERNFS_SUICIDED = 0x0800, 47 KERNFS_SUICIDED = 0x0800,
49}; 48};
@@ -291,7 +290,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
291 umode_t mode, loff_t size, 290 umode_t mode, loff_t size,
292 const struct kernfs_ops *ops, 291 const struct kernfs_ops *ops,
293 void *priv, const void *ns, 292 void *priv, const void *ns,
294 bool name_is_static,
295 struct lock_class_key *key); 293 struct lock_class_key *key);
296struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, 294struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
297 const char *name, 295 const char *name,
@@ -369,8 +367,7 @@ kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
369static inline struct kernfs_node * 367static inline struct kernfs_node *
370__kernfs_create_file(struct kernfs_node *parent, const char *name, 368__kernfs_create_file(struct kernfs_node *parent, const char *name,
371 umode_t mode, loff_t size, const struct kernfs_ops *ops, 369 umode_t mode, loff_t size, const struct kernfs_ops *ops,
372 void *priv, const void *ns, bool name_is_static, 370 void *priv, const void *ns, struct lock_class_key *key)
373 struct lock_class_key *key)
374{ return ERR_PTR(-ENOSYS); } 371{ return ERR_PTR(-ENOSYS); }
375 372
376static inline struct kernfs_node * 373static inline struct kernfs_node *
@@ -439,7 +436,7 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
439 key = (struct lock_class_key *)&ops->lockdep_key; 436 key = (struct lock_class_key *)&ops->lockdep_key;
440#endif 437#endif
441 return __kernfs_create_file(parent, name, mode, size, ops, priv, ns, 438 return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
442 false, key); 439 key);
443} 440}
444 441
445static inline struct kernfs_node * 442static inline struct kernfs_node *
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9d957b7ae095..e60a745ac198 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -1,6 +1,19 @@
1#ifndef LINUX_KEXEC_H 1#ifndef LINUX_KEXEC_H
2#define LINUX_KEXEC_H 2#define LINUX_KEXEC_H
3 3
4#define IND_DESTINATION_BIT 0
5#define IND_INDIRECTION_BIT 1
6#define IND_DONE_BIT 2
7#define IND_SOURCE_BIT 3
8
9#define IND_DESTINATION (1 << IND_DESTINATION_BIT)
10#define IND_INDIRECTION (1 << IND_INDIRECTION_BIT)
11#define IND_DONE (1 << IND_DONE_BIT)
12#define IND_SOURCE (1 << IND_SOURCE_BIT)
13#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
14
15#if !defined(__ASSEMBLY__)
16
4#include <uapi/linux/kexec.h> 17#include <uapi/linux/kexec.h>
5 18
6#ifdef CONFIG_KEXEC 19#ifdef CONFIG_KEXEC
@@ -64,10 +77,6 @@
64 */ 77 */
65 78
66typedef unsigned long kimage_entry_t; 79typedef unsigned long kimage_entry_t;
67#define IND_DESTINATION 0x1
68#define IND_INDIRECTION 0x2
69#define IND_DONE 0x4
70#define IND_SOURCE 0x8
71 80
72struct kexec_segment { 81struct kexec_segment {
73 /* 82 /*
@@ -122,8 +131,6 @@ struct kimage {
122 kimage_entry_t *entry; 131 kimage_entry_t *entry;
123 kimage_entry_t *last_entry; 132 kimage_entry_t *last_entry;
124 133
125 unsigned long destination;
126
127 unsigned long start; 134 unsigned long start;
128 struct page *control_code_page; 135 struct page *control_code_page;
129 struct page *swap_page; 136 struct page *swap_page;
@@ -313,4 +320,7 @@ struct task_struct;
313static inline void crash_kexec(struct pt_regs *regs) { } 320static inline void crash_kexec(struct pt_regs *regs) { }
314static inline int kexec_should_crash(struct task_struct *p) { return 0; } 321static inline int kexec_should_crash(struct task_struct *p) { return 0; }
315#endif /* CONFIG_KEXEC */ 322#endif /* CONFIG_KEXEC */
323
324#endif /* !defined(__ASSEBMLY__) */
325
316#endif /* LINUX_KEXEC_H */ 326#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5297f9fa0ef2..1ab54754a86d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -308,7 +308,8 @@ struct optimized_kprobe {
308/* Architecture dependent functions for direct jump optimization */ 308/* Architecture dependent functions for direct jump optimization */
309extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn); 309extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
310extern int arch_check_optimized_kprobe(struct optimized_kprobe *op); 310extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
311extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op); 311extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
312 struct kprobe *orig);
312extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op); 313extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
313extern void arch_optimize_kprobes(struct list_head *oplist); 314extern void arch_optimize_kprobes(struct list_head *oplist);
314extern void arch_unoptimize_kprobes(struct list_head *oplist, 315extern void arch_unoptimize_kprobes(struct list_head *oplist,
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index c9d645ad98ff..5fc3d1083071 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -166,7 +166,17 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
166} 166}
167 167
168#if BITS_PER_LONG < 64 168#if BITS_PER_LONG < 64
169extern u64 ktime_divns(const ktime_t kt, s64 div); 169extern u64 __ktime_divns(const ktime_t kt, s64 div);
170static inline u64 ktime_divns(const ktime_t kt, s64 div)
171{
172 if (__builtin_constant_p(div) && !(div >> 32)) {
173 u64 ns = kt.tv64;
174 do_div(ns, div);
175 return ns;
176 } else {
177 return __ktime_divns(kt, div);
178 }
179}
170#else /* BITS_PER_LONG < 64 */ 180#else /* BITS_PER_LONG < 64 */
171# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) 181# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
172#endif 182#endif
@@ -186,6 +196,11 @@ static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
186 return ktime_to_us(ktime_sub(later, earlier)); 196 return ktime_to_us(ktime_sub(later, earlier));
187} 197}
188 198
199static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
200{
201 return ktime_to_ms(ktime_sub(later, earlier));
202}
203
189static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) 204static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
190{ 205{
191 return ktime_add_ns(kt, usec * NSEC_PER_USEC); 206 return ktime_add_ns(kt, usec * NSEC_PER_USEC);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 26f106022c88..d12b2104d19b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -33,10 +33,6 @@
33 33
34#include <asm/kvm_host.h> 34#include <asm/kvm_host.h>
35 35
36#ifndef KVM_MMIO_SIZE
37#define KVM_MMIO_SIZE 8
38#endif
39
40/* 36/*
41 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 37 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
42 * in kvm, other bits are visible for userspace which are defined in 38 * in kvm, other bits are visible for userspace which are defined in
@@ -200,17 +196,6 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
200int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 196int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
201#endif 197#endif
202 198
203/*
204 * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
205 * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
206 * controls whether we retry the gup one more time to completion in that case.
207 * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
208 * handler.
209 */
210int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
211 unsigned long addr, bool write_fault,
212 struct page **pagep);
213
214enum { 199enum {
215 OUTSIDE_GUEST_MODE, 200 OUTSIDE_GUEST_MODE,
216 IN_GUEST_MODE, 201 IN_GUEST_MODE,
@@ -611,6 +596,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
611 596
612int kvm_get_dirty_log(struct kvm *kvm, 597int kvm_get_dirty_log(struct kvm *kvm,
613 struct kvm_dirty_log *log, int *is_dirty); 598 struct kvm_dirty_log *log, int *is_dirty);
599
600int kvm_get_dirty_log_protect(struct kvm *kvm,
601 struct kvm_dirty_log *log, bool *is_dirty);
602
603void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
604 struct kvm_memory_slot *slot,
605 gfn_t gfn_offset,
606 unsigned long mask);
607
614int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 608int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
615 struct kvm_dirty_log *log); 609 struct kvm_dirty_log *log);
616 610
@@ -652,7 +646,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
652void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 646void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
653struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); 647struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
654int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); 648int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
655int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 649void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
656void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 650void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
657 651
658int kvm_arch_hardware_enable(void); 652int kvm_arch_hardware_enable(void);
@@ -1042,6 +1036,8 @@ void kvm_unregister_device_ops(u32 type);
1042 1036
1043extern struct kvm_device_ops kvm_mpic_ops; 1037extern struct kvm_device_ops kvm_mpic_ops;
1044extern struct kvm_device_ops kvm_xics_ops; 1038extern struct kvm_device_ops kvm_xics_ops;
1039extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1040extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1045 1041
1046#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1042#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1047 1043
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
new file mode 100644
index 000000000000..5ba2facd7a51
--- /dev/null
+++ b/include/linux/led-class-flash.h
@@ -0,0 +1,207 @@
1/*
2 * LED Flash class interface
3 *
4 * Copyright (C) 2015 Samsung Electronics Co., Ltd.
5 * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#ifndef __LINUX_FLASH_LEDS_H_INCLUDED
13#define __LINUX_FLASH_LEDS_H_INCLUDED
14
15#include <linux/leds.h>
16#include <uapi/linux/v4l2-controls.h>
17
18struct device_node;
19struct led_classdev_flash;
20
21/*
22 * Supported led fault bits - must be kept in synch
23 * with V4L2_FLASH_FAULT bits.
24 */
25#define LED_FAULT_OVER_VOLTAGE (1 << 0)
26#define LED_FAULT_TIMEOUT (1 << 1)
27#define LED_FAULT_OVER_TEMPERATURE (1 << 2)
28#define LED_FAULT_SHORT_CIRCUIT (1 << 3)
29#define LED_FAULT_OVER_CURRENT (1 << 4)
30#define LED_FAULT_INDICATOR (1 << 5)
31#define LED_FAULT_UNDER_VOLTAGE (1 << 6)
32#define LED_FAULT_INPUT_VOLTAGE (1 << 7)
33#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8)
34#define LED_NUM_FLASH_FAULTS 9
35
36#define LED_FLASH_MAX_SYSFS_GROUPS 7
37
38struct led_flash_ops {
39 /* set flash brightness */
40 int (*flash_brightness_set)(struct led_classdev_flash *fled_cdev,
41 u32 brightness);
42 /* get flash brightness */
43 int (*flash_brightness_get)(struct led_classdev_flash *fled_cdev,
44 u32 *brightness);
45 /* set flash strobe state */
46 int (*strobe_set)(struct led_classdev_flash *fled_cdev, bool state);
47 /* get flash strobe state */
48 int (*strobe_get)(struct led_classdev_flash *fled_cdev, bool *state);
49 /* set flash timeout */
50 int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout);
51 /* get the flash LED fault */
52 int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault);
53};
54
55/*
56 * Current value of a flash setting along
57 * with its constraints.
58 */
59struct led_flash_setting {
60 /* maximum allowed value */
61 u32 min;
62 /* maximum allowed value */
63 u32 max;
64 /* step value */
65 u32 step;
66 /* current value */
67 u32 val;
68};
69
70struct led_classdev_flash {
71 /* led class device */
72 struct led_classdev led_cdev;
73
74 /* flash led specific ops */
75 const struct led_flash_ops *ops;
76
77 /* flash brightness value in microamperes along with its constraints */
78 struct led_flash_setting brightness;
79
80 /* flash timeout value in microseconds along with its constraints */
81 struct led_flash_setting timeout;
82
83 /* LED Flash class sysfs groups */
84 const struct attribute_group *sysfs_groups[LED_FLASH_MAX_SYSFS_GROUPS];
85
86 /* LEDs available for flash strobe synchronization */
87 struct led_classdev_flash **sync_leds;
88
89 /* Number of LEDs available for flash strobe synchronization */
90 int num_sync_leds;
91
92 /*
93 * The identifier of the sub-led to synchronize the flash strobe with.
94 * Identifiers start from 1, which reflects the first element from the
95 * sync_leds array. 0 means that the flash strobe should not be
96 * synchronized.
97 */
98 u32 sync_led_id;
99};
100
101static inline struct led_classdev_flash *lcdev_to_flcdev(
102 struct led_classdev *lcdev)
103{
104 return container_of(lcdev, struct led_classdev_flash, led_cdev);
105}
106
107/**
108 * led_classdev_flash_register - register a new object of led_classdev class
109 * with support for flash LEDs
110 * @parent: the flash LED to register
111 * @fled_cdev: the led_classdev_flash structure for this device
112 *
113 * Returns: 0 on success or negative error value on failure
114 */
115extern int led_classdev_flash_register(struct device *parent,
116 struct led_classdev_flash *fled_cdev);
117
118/**
119 * led_classdev_flash_unregister - unregisters an object of led_classdev class
120 * with support for flash LEDs
121 * @fled_cdev: the flash LED to unregister
122 *
123 * Unregister a previously registered via led_classdev_flash_register object
124 */
125extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
126
127/**
128 * led_set_flash_strobe - setup flash strobe
129 * @fled_cdev: the flash LED to set strobe on
130 * @state: 1 - strobe flash, 0 - stop flash strobe
131 *
132 * Strobe the flash LED.
133 *
134 * Returns: 0 on success or negative error value on failure
135 */
136static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
137 bool state)
138{
139 return fled_cdev->ops->strobe_set(fled_cdev, state);
140}
141
142/**
143 * led_get_flash_strobe - get flash strobe status
144 * @fled_cdev: the flash LED to query
145 * @state: 1 - flash is strobing, 0 - flash is off
146 *
147 * Check whether the flash is strobing at the moment.
148 *
149 * Returns: 0 on success or negative error value on failure
150 */
151static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
152 bool *state)
153{
154 if (fled_cdev->ops->strobe_get)
155 return fled_cdev->ops->strobe_get(fled_cdev, state);
156
157 return -EINVAL;
158}
159
160/**
161 * led_set_flash_brightness - set flash LED brightness
162 * @fled_cdev: the flash LED to set
163 * @brightness: the brightness to set it to
164 *
165 * Set a flash LED's brightness.
166 *
167 * Returns: 0 on success or negative error value on failure
168 */
169extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
170 u32 brightness);
171
172/**
173 * led_update_flash_brightness - update flash LED brightness
174 * @fled_cdev: the flash LED to query
175 *
176 * Get a flash LED's current brightness and update led_flash->brightness
177 * member with the obtained value.
178 *
179 * Returns: 0 on success or negative error value on failure
180 */
181extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
182
183/**
184 * led_set_flash_timeout - set flash LED timeout
185 * @fled_cdev: the flash LED to set
186 * @timeout: the flash timeout to set it to
187 *
188 * Set the flash strobe duration.
189 *
190 * Returns: 0 on success or negative error value on failure
191 */
192extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
193 u32 timeout);
194
195/**
196 * led_get_flash_fault - get the flash LED fault
197 * @fled_cdev: the flash LED to query
198 * @fault: bitmask containing flash faults
199 *
200 * Get the flash LED fault.
201 *
202 * Returns: 0 on success or negative error value on failure
203 */
204extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
205 u32 *fault);
206
207#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index cfceef32c9b3..f70f84f35674 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -46,6 +46,8 @@ struct led_classdev {
46#define LED_SYSFS_DISABLE (1 << 20) 46#define LED_SYSFS_DISABLE (1 << 20)
47#define SET_BRIGHTNESS_ASYNC (1 << 21) 47#define SET_BRIGHTNESS_ASYNC (1 << 21)
48#define SET_BRIGHTNESS_SYNC (1 << 22) 48#define SET_BRIGHTNESS_SYNC (1 << 22)
49#define LED_DEV_CAP_FLASH (1 << 23)
50#define LED_DEV_CAP_SYNC_STROBE (1 << 24)
49 51
50 /* Set LED brightness level */ 52 /* Set LED brightness level */
51 /* Must not sleep, use a workqueue if needed */ 53 /* Must not sleep, use a workqueue if needed */
@@ -81,6 +83,7 @@ struct led_classdev {
81 unsigned long blink_delay_on, blink_delay_off; 83 unsigned long blink_delay_on, blink_delay_off;
82 struct timer_list blink_timer; 84 struct timer_list blink_timer;
83 int blink_brightness; 85 int blink_brightness;
86 void (*flash_resume)(struct led_classdev *led_cdev);
84 87
85 struct work_struct set_brightness_work; 88 struct work_struct set_brightness_work;
86 int delayed_set_value; 89 int delayed_set_value;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2d182413b1db..fc03efa64ffe 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -231,6 +231,7 @@ enum {
231 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity 231 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
232 * led */ 232 * led */
233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ 233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
234 ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
234 235
235 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 236 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
236 237
@@ -422,6 +423,7 @@ enum {
422 ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ 423 ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
423 ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ 424 ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
424 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 425 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
426 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
425 427
426 /* DMA mask for user DMA control: User visible values; DO NOT 428 /* DMA mask for user DMA control: User visible values; DO NOT
427 renumber */ 429 renumber */
@@ -821,10 +823,10 @@ struct ata_port {
821 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 823 unsigned int cbl; /* cable type; ATA_CBL_xxx */
822 824
823 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 825 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
824 unsigned long qc_allocated; 826 unsigned long sas_tag_allocated; /* for sas tag allocation only */
825 unsigned int qc_active; 827 unsigned int qc_active;
826 int nr_active_links; /* #links with active qcs */ 828 int nr_active_links; /* #links with active qcs */
827 unsigned int last_tag; /* track next tag hw expects */ 829 unsigned int sas_last_tag; /* track next tag hw expects */
828 830
829 struct ata_link link; /* host default link */ 831 struct ata_link link; /* host default link */
830 struct ata_link *slave_link; /* see ata_slave_link_init() */ 832 struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -1338,12 +1340,19 @@ extern const struct ata_port_operations ata_base_port_ops;
1338extern const struct ata_port_operations sata_port_ops; 1340extern const struct ata_port_operations sata_port_ops;
1339extern struct device_attribute *ata_common_sdev_attrs[]; 1341extern struct device_attribute *ata_common_sdev_attrs[];
1340 1342
1343/*
1344 * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
1345 * by the edge drivers. Because the 'module' field of sht must be the
1346 * edge driver's module reference, otherwise the driver can be unloaded
1347 * even if the scsi_device is being accessed.
1348 */
1341#define ATA_BASE_SHT(drv_name) \ 1349#define ATA_BASE_SHT(drv_name) \
1342 .module = THIS_MODULE, \ 1350 .module = THIS_MODULE, \
1343 .name = drv_name, \ 1351 .name = drv_name, \
1344 .ioctl = ata_scsi_ioctl, \ 1352 .ioctl = ata_scsi_ioctl, \
1345 .queuecommand = ata_scsi_queuecmd, \ 1353 .queuecommand = ata_scsi_queuecmd, \
1346 .can_queue = ATA_DEF_QUEUE, \ 1354 .can_queue = ATA_DEF_QUEUE, \
1355 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
1347 .this_id = ATA_SHT_THIS_ID, \ 1356 .this_id = ATA_SHT_THIS_ID, \
1348 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \ 1357 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
1349 .emulated = ATA_SHT_EMULATED, \ 1358 .emulated = ATA_SHT_EMULATED, \
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index f3434533fbf8..2a6b9947aaa3 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -9,6 +9,9 @@
9 9
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/nodemask.h> 11#include <linux/nodemask.h>
12#include <linux/shrinker.h>
13
14struct mem_cgroup;
12 15
13/* list_lru_walk_cb has to always return one of those */ 16/* list_lru_walk_cb has to always return one of those */
14enum lru_status { 17enum lru_status {
@@ -21,24 +24,45 @@ enum lru_status {
21 internally, but has to return locked. */ 24 internally, but has to return locked. */
22}; 25};
23 26
24struct list_lru_node { 27struct list_lru_one {
25 spinlock_t lock;
26 struct list_head list; 28 struct list_head list;
27 /* kept as signed so we can catch imbalance bugs */ 29 /* may become negative during memcg reparenting */
28 long nr_items; 30 long nr_items;
31};
32
33struct list_lru_memcg {
34 /* array of per cgroup lists, indexed by memcg_cache_id */
35 struct list_lru_one *lru[0];
36};
37
38struct list_lru_node {
39 /* protects all lists on the node, including per cgroup */
40 spinlock_t lock;
41 /* global list, used for the root cgroup in cgroup aware lrus */
42 struct list_lru_one lru;
43#ifdef CONFIG_MEMCG_KMEM
44 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
45 struct list_lru_memcg *memcg_lrus;
46#endif
29} ____cacheline_aligned_in_smp; 47} ____cacheline_aligned_in_smp;
30 48
31struct list_lru { 49struct list_lru {
32 struct list_lru_node *node; 50 struct list_lru_node *node;
33 nodemask_t active_nodes; 51#ifdef CONFIG_MEMCG_KMEM
52 struct list_head list;
53#endif
34}; 54};
35 55
36void list_lru_destroy(struct list_lru *lru); 56void list_lru_destroy(struct list_lru *lru);
37int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key); 57int __list_lru_init(struct list_lru *lru, bool memcg_aware,
38static inline int list_lru_init(struct list_lru *lru) 58 struct lock_class_key *key);
39{ 59
40 return list_lru_init_key(lru, NULL); 60#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
41} 61#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
62#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
63
64int memcg_update_all_list_lrus(int num_memcgs);
65void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
42 66
43/** 67/**
44 * list_lru_add: add an element to the lru list's tail 68 * list_lru_add: add an element to the lru list's tail
@@ -72,32 +96,48 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item);
72bool list_lru_del(struct list_lru *lru, struct list_head *item); 96bool list_lru_del(struct list_lru *lru, struct list_head *item);
73 97
74/** 98/**
75 * list_lru_count_node: return the number of objects currently held by @lru 99 * list_lru_count_one: return the number of objects currently held by @lru
76 * @lru: the lru pointer. 100 * @lru: the lru pointer.
77 * @nid: the node id to count from. 101 * @nid: the node id to count from.
102 * @memcg: the cgroup to count from.
78 * 103 *
79 * Always return a non-negative number, 0 for empty lists. There is no 104 * Always return a non-negative number, 0 for empty lists. There is no
80 * guarantee that the list is not updated while the count is being computed. 105 * guarantee that the list is not updated while the count is being computed.
81 * Callers that want such a guarantee need to provide an outer lock. 106 * Callers that want such a guarantee need to provide an outer lock.
82 */ 107 */
108unsigned long list_lru_count_one(struct list_lru *lru,
109 int nid, struct mem_cgroup *memcg);
83unsigned long list_lru_count_node(struct list_lru *lru, int nid); 110unsigned long list_lru_count_node(struct list_lru *lru, int nid);
111
112static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
113 struct shrink_control *sc)
114{
115 return list_lru_count_one(lru, sc->nid, sc->memcg);
116}
117
84static inline unsigned long list_lru_count(struct list_lru *lru) 118static inline unsigned long list_lru_count(struct list_lru *lru)
85{ 119{
86 long count = 0; 120 long count = 0;
87 int nid; 121 int nid;
88 122
89 for_each_node_mask(nid, lru->active_nodes) 123 for_each_node_state(nid, N_NORMAL_MEMORY)
90 count += list_lru_count_node(lru, nid); 124 count += list_lru_count_node(lru, nid);
91 125
92 return count; 126 return count;
93} 127}
94 128
95typedef enum lru_status 129void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
96(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); 130void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
131 struct list_head *head);
132
133typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
134 struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
135
97/** 136/**
98 * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items. 137 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
99 * @lru: the lru pointer. 138 * @lru: the lru pointer.
100 * @nid: the node id to scan from. 139 * @nid: the node id to scan from.
140 * @memcg: the cgroup to scan from.
101 * @isolate: callback function that is resposible for deciding what to do with 141 * @isolate: callback function that is resposible for deciding what to do with
102 * the item currently being scanned 142 * the item currently being scanned
103 * @cb_arg: opaque type that will be passed to @isolate 143 * @cb_arg: opaque type that will be passed to @isolate
@@ -115,18 +155,30 @@ typedef enum lru_status
115 * 155 *
116 * Return value: the number of objects effectively removed from the LRU. 156 * Return value: the number of objects effectively removed from the LRU.
117 */ 157 */
158unsigned long list_lru_walk_one(struct list_lru *lru,
159 int nid, struct mem_cgroup *memcg,
160 list_lru_walk_cb isolate, void *cb_arg,
161 unsigned long *nr_to_walk);
118unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 162unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
119 list_lru_walk_cb isolate, void *cb_arg, 163 list_lru_walk_cb isolate, void *cb_arg,
120 unsigned long *nr_to_walk); 164 unsigned long *nr_to_walk);
121 165
122static inline unsigned long 166static inline unsigned long
167list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
168 list_lru_walk_cb isolate, void *cb_arg)
169{
170 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
171 &sc->nr_to_scan);
172}
173
174static inline unsigned long
123list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, 175list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
124 void *cb_arg, unsigned long nr_to_walk) 176 void *cb_arg, unsigned long nr_to_walk)
125{ 177{
126 long isolated = 0; 178 long isolated = 0;
127 int nid; 179 int nid;
128 180
129 for_each_node_mask(nid, lru->active_nodes) { 181 for_each_node_state(nid, N_NORMAL_MEMORY) {
130 isolated += list_lru_walk_node(lru, nid, isolate, 182 isolated += list_lru_walk_node(lru, nid, isolate,
131 cb_arg, &nr_to_walk); 183 cb_arg, &nr_to_walk);
132 if (nr_to_walk <= 0) 184 if (nr_to_walk <= 0)
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 5d10ae364b5e..f266661d2666 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -1,6 +1,9 @@
1#ifndef _LINUX_LIST_NULLS_H 1#ifndef _LINUX_LIST_NULLS_H
2#define _LINUX_LIST_NULLS_H 2#define _LINUX_LIST_NULLS_H
3 3
4#include <linux/poison.h>
5#include <linux/const.h>
6
4/* 7/*
5 * Special version of lists, where end of list is not a NULL pointer, 8 * Special version of lists, where end of list is not a NULL pointer,
6 * but a 'nulls' marker, which can have many different values. 9 * but a 'nulls' marker, which can have many different values.
@@ -21,8 +24,9 @@ struct hlist_nulls_head {
21struct hlist_nulls_node { 24struct hlist_nulls_node {
22 struct hlist_nulls_node *next, **pprev; 25 struct hlist_nulls_node *next, **pprev;
23}; 26};
27#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
24#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \ 28#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
25 ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1))) 29 ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
26 30
27#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) 31#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
28/** 32/**
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
new file mode 100644
index 000000000000..95023fd8b00d
--- /dev/null
+++ b/include/linux/livepatch.h
@@ -0,0 +1,133 @@
1/*
2 * livepatch.h - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef _LINUX_LIVEPATCH_H_
22#define _LINUX_LIVEPATCH_H_
23
24#include <linux/module.h>
25#include <linux/ftrace.h>
26
27#if IS_ENABLED(CONFIG_LIVEPATCH)
28
29#include <asm/livepatch.h>
30
31enum klp_state {
32 KLP_DISABLED,
33 KLP_ENABLED
34};
35
36/**
37 * struct klp_func - function structure for live patching
38 * @old_name: name of the function to be patched
39 * @new_func: pointer to the patched function code
40 * @old_addr: a hint conveying at what address the old function
41 * can be found (optional, vmlinux patches only)
42 * @kobj: kobject for sysfs resources
43 * @state: tracks function-level patch application state
44 * @stack_node: list node for klp_ops func_stack list
45 */
46struct klp_func {
47 /* external */
48 const char *old_name;
49 void *new_func;
50 /*
51 * The old_addr field is optional and can be used to resolve
52 * duplicate symbol names in the vmlinux object. If this
53 * information is not present, the symbol is located by name
54 * with kallsyms. If the name is not unique and old_addr is
55 * not provided, the patch application fails as there is no
56 * way to resolve the ambiguity.
57 */
58 unsigned long old_addr;
59
60 /* internal */
61 struct kobject kobj;
62 enum klp_state state;
63 struct list_head stack_node;
64};
65
66/**
67 * struct klp_reloc - relocation structure for live patching
68 * @loc: address where the relocation will be written
69 * @val: address of the referenced symbol (optional,
70 * vmlinux patches only)
71 * @type: ELF relocation type
72 * @name: name of the referenced symbol (for lookup/verification)
73 * @addend: offset from the referenced symbol
74 * @external: symbol is either exported or within the live patch module itself
75 */
76struct klp_reloc {
77 unsigned long loc;
78 unsigned long val;
79 unsigned long type;
80 const char *name;
81 int addend;
82 int external;
83};
84
85/**
86 * struct klp_object - kernel object structure for live patching
87 * @name: module name (or NULL for vmlinux)
88 * @relocs: relocation entries to be applied at load time
89 * @funcs: function entries for functions to be patched in the object
90 * @kobj: kobject for sysfs resources
91 * @mod: kernel module associated with the patched object
92 * (NULL for vmlinux)
93 * @state: tracks object-level patch application state
94 */
95struct klp_object {
96 /* external */
97 const char *name;
98 struct klp_reloc *relocs;
99 struct klp_func *funcs;
100
101 /* internal */
102 struct kobject *kobj;
103 struct module *mod;
104 enum klp_state state;
105};
106
107/**
108 * struct klp_patch - patch structure for live patching
109 * @mod: reference to the live patch module
110 * @objs: object entries for kernel objects to be patched
111 * @list: list node for global list of registered patches
112 * @kobj: kobject for sysfs resources
113 * @state: tracks patch-level application state
114 */
115struct klp_patch {
116 /* external */
117 struct module *mod;
118 struct klp_object *objs;
119
120 /* internal */
121 struct list_head list;
122 struct kobject kobj;
123 enum klp_state state;
124};
125
126extern int klp_register_patch(struct klp_patch *);
127extern int klp_unregister_patch(struct klp_patch *);
128extern int klp_enable_patch(struct klp_patch *);
129extern int klp_disable_patch(struct klp_patch *);
130
131#endif /* CONFIG_LIVEPATCH */
132
133#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 4bfde0e99ed5..b10b122dd099 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -28,12 +28,13 @@ struct lockref {
28#endif 28#endif
29 struct { 29 struct {
30 spinlock_t lock; 30 spinlock_t lock;
31 unsigned int count; 31 int count;
32 }; 32 };
33 }; 33 };
34}; 34};
35 35
36extern void lockref_get(struct lockref *); 36extern void lockref_get(struct lockref *);
37extern int lockref_put_return(struct lockref *);
37extern int lockref_get_not_zero(struct lockref *); 38extern int lockref_get_not_zero(struct lockref *);
38extern int lockref_get_or_lock(struct lockref *); 39extern int lockref_get_or_lock(struct lockref *);
39extern int lockref_put_or_lock(struct lockref *); 40extern int lockref_put_or_lock(struct lockref *);
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 164aad1f9f12..0819d36a3a74 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -25,8 +25,8 @@ int __mei_cl_driver_register(struct mei_cl_driver *driver,
25 25
26void mei_cl_driver_unregister(struct mei_cl_driver *driver); 26void mei_cl_driver_unregister(struct mei_cl_driver *driver);
27 27
28int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); 28ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
29int mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); 29ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
30 30
31typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, 31typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
32 u32 events, void *context); 32 u32 events, void *context);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7c95af8d552c..72dff5fb0d0c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -52,7 +52,27 @@ struct mem_cgroup_reclaim_cookie {
52 unsigned int generation; 52 unsigned int generation;
53}; 53};
54 54
55enum mem_cgroup_events_index {
56 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
57 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
58 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
59 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
60 MEM_CGROUP_EVENTS_NSTATS,
61 /* default hierarchy events */
62 MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
63 MEMCG_HIGH,
64 MEMCG_MAX,
65 MEMCG_OOM,
66 MEMCG_NR_EVENTS,
67};
68
55#ifdef CONFIG_MEMCG 69#ifdef CONFIG_MEMCG
70void mem_cgroup_events(struct mem_cgroup *memcg,
71 enum mem_cgroup_events_index idx,
72 unsigned int nr);
73
74bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
75
56int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 76int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
57 gfp_t gfp_mask, struct mem_cgroup **memcgp); 77 gfp_t gfp_mask, struct mem_cgroup **memcgp);
58void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 78void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -102,6 +122,7 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
102 * For memory reclaim. 122 * For memory reclaim.
103 */ 123 */
104int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); 124int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
125bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
105int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 126int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
106unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 127unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
107void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 128void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
@@ -138,12 +159,10 @@ static inline bool mem_cgroup_disabled(void)
138 return false; 159 return false;
139} 160}
140 161
141struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, 162struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
142 unsigned long *flags);
143void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
144 unsigned long *flags);
145void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, 163void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
146 enum mem_cgroup_stat_index idx, int val); 164 enum mem_cgroup_stat_index idx, int val);
165void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
147 166
148static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 167static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
149 enum mem_cgroup_stat_index idx) 168 enum mem_cgroup_stat_index idx)
@@ -176,6 +195,18 @@ void mem_cgroup_split_huge_fixup(struct page *head);
176#else /* CONFIG_MEMCG */ 195#else /* CONFIG_MEMCG */
177struct mem_cgroup; 196struct mem_cgroup;
178 197
198static inline void mem_cgroup_events(struct mem_cgroup *memcg,
199 enum mem_cgroup_events_index idx,
200 unsigned int nr)
201{
202}
203
204static inline bool mem_cgroup_low(struct mem_cgroup *root,
205 struct mem_cgroup *memcg)
206{
207 return false;
208}
209
179static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 210static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
180 gfp_t gfp_mask, 211 gfp_t gfp_mask,
181 struct mem_cgroup **memcgp) 212 struct mem_cgroup **memcgp)
@@ -268,6 +299,11 @@ mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
268 return 1; 299 return 1;
269} 300}
270 301
302static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
303{
304 return true;
305}
306
271static inline unsigned long 307static inline unsigned long
272mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 308mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
273{ 309{
@@ -285,14 +321,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
285{ 321{
286} 322}
287 323
288static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, 324static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
289 bool *locked, unsigned long *flags)
290{ 325{
291 return NULL; 326 return NULL;
292} 327}
293 328
294static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, 329static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
295 bool *locked, unsigned long *flags)
296{ 330{
297} 331}
298 332
@@ -364,7 +398,9 @@ static inline void sock_release_memcg(struct sock *sk)
364#ifdef CONFIG_MEMCG_KMEM 398#ifdef CONFIG_MEMCG_KMEM
365extern struct static_key memcg_kmem_enabled_key; 399extern struct static_key memcg_kmem_enabled_key;
366 400
367extern int memcg_limited_groups_array_size; 401extern int memcg_nr_cache_ids;
402extern void memcg_get_cache_ids(void);
403extern void memcg_put_cache_ids(void);
368 404
369/* 405/*
370 * Helper macro to loop through all memcg-specific caches. Callers must still 406 * Helper macro to loop through all memcg-specific caches. Callers must still
@@ -372,13 +408,15 @@ extern int memcg_limited_groups_array_size;
372 * the slab_mutex must be held when looping through those caches 408 * the slab_mutex must be held when looping through those caches
373 */ 409 */
374#define for_each_memcg_cache_index(_idx) \ 410#define for_each_memcg_cache_index(_idx) \
375 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) 411 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
376 412
377static inline bool memcg_kmem_enabled(void) 413static inline bool memcg_kmem_enabled(void)
378{ 414{
379 return static_key_false(&memcg_kmem_enabled_key); 415 return static_key_false(&memcg_kmem_enabled_key);
380} 416}
381 417
418bool memcg_kmem_is_active(struct mem_cgroup *memcg);
419
382/* 420/*
383 * In general, we'll do everything in our power to not incur in any overhead 421 * In general, we'll do everything in our power to not incur in any overhead
384 * for non-memcg users for the kmem functions. Not even a function call, if we 422 * for non-memcg users for the kmem functions. Not even a function call, if we
@@ -398,15 +436,14 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
398 436
399int memcg_cache_id(struct mem_cgroup *memcg); 437int memcg_cache_id(struct mem_cgroup *memcg);
400 438
401void memcg_update_array_size(int num_groups);
402
403struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); 439struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
404void __memcg_kmem_put_cache(struct kmem_cache *cachep); 440void __memcg_kmem_put_cache(struct kmem_cache *cachep);
405 441
406int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); 442struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
407void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
408 443
409int __memcg_cleanup_cache_params(struct kmem_cache *s); 444int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
445 unsigned long nr_pages);
446void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
410 447
411/** 448/**
412 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 449 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -500,6 +537,13 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
500 if (memcg_kmem_enabled()) 537 if (memcg_kmem_enabled())
501 __memcg_kmem_put_cache(cachep); 538 __memcg_kmem_put_cache(cachep);
502} 539}
540
541static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
542{
543 if (!memcg_kmem_enabled())
544 return NULL;
545 return __mem_cgroup_from_kmem(ptr);
546}
503#else 547#else
504#define for_each_memcg_cache_index(_idx) \ 548#define for_each_memcg_cache_index(_idx) \
505 for (; NULL; ) 549 for (; NULL; )
@@ -509,6 +553,11 @@ static inline bool memcg_kmem_enabled(void)
509 return false; 553 return false;
510} 554}
511 555
556static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
557{
558 return false;
559}
560
512static inline bool 561static inline bool
513memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 562memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
514{ 563{
@@ -529,6 +578,14 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
529 return -1; 578 return -1;
530} 579}
531 580
581static inline void memcg_get_cache_ids(void)
582{
583}
584
585static inline void memcg_put_cache_ids(void)
586{
587}
588
532static inline struct kmem_cache * 589static inline struct kmem_cache *
533memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 590memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
534{ 591{
@@ -538,6 +595,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
538static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) 595static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
539{ 596{
540} 597}
598
599static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
600{
601 return NULL;
602}
541#endif /* CONFIG_MEMCG_KMEM */ 603#endif /* CONFIG_MEMCG_KMEM */
542#endif /* _LINUX_MEMCONTROL_H */ 604#endif /* _LINUX_MEMCONTROL_H */
543 605
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
index cc892a8d8d6e..12a5b396921e 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -461,7 +461,6 @@ struct ab8500_fg;
461#ifdef CONFIG_AB8500_BM 461#ifdef CONFIG_AB8500_BM
462extern struct abx500_bm_data ab8500_bm_data; 462extern struct abx500_bm_data ab8500_bm_data;
463 463
464void ab8500_fg_reinit(void);
465void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA); 464void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
466struct ab8500_btemp *ab8500_btemp_get(void); 465struct ab8500_btemp *ab8500_btemp_get(void);
467int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp); 466int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 08dae01258b9..955dd990beaf 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -143,10 +143,118 @@ enum max77693_pmic_reg {
143#define FLASH_INT_FLED1_SHORT BIT(3) 143#define FLASH_INT_FLED1_SHORT BIT(3)
144#define FLASH_INT_OVER_CURRENT BIT(4) 144#define FLASH_INT_OVER_CURRENT BIT(4)
145 145
146/* Fast charge timer in in hours */
147#define DEFAULT_FAST_CHARGE_TIMER 4
148/* microamps */
149#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
150/* minutes */
151#define DEFAULT_TOP_OFF_TIMER 30
152/* microvolts */
153#define DEFAULT_CONSTANT_VOLT 4200000
154/* microvolts */
155#define DEFAULT_MIN_SYSTEM_VOLT 3600000
156/* celsius */
157#define DEFAULT_THERMAL_REGULATION_TEMP 100
158/* microamps */
159#define DEFAULT_BATTERY_OVERCURRENT 3500000
160/* microvolts */
161#define DEFAULT_CHARGER_INPUT_THRESHOLD_VOLT 4300000
162
163/* MAX77693_CHG_REG_CHG_INT_OK register */
164#define CHG_INT_OK_BYP_SHIFT 0
165#define CHG_INT_OK_BAT_SHIFT 3
166#define CHG_INT_OK_CHG_SHIFT 4
167#define CHG_INT_OK_CHGIN_SHIFT 6
168#define CHG_INT_OK_DETBAT_SHIFT 7
169#define CHG_INT_OK_BYP_MASK BIT(CHG_INT_OK_BYP_SHIFT)
170#define CHG_INT_OK_BAT_MASK BIT(CHG_INT_OK_BAT_SHIFT)
171#define CHG_INT_OK_CHG_MASK BIT(CHG_INT_OK_CHG_SHIFT)
172#define CHG_INT_OK_CHGIN_MASK BIT(CHG_INT_OK_CHGIN_SHIFT)
173#define CHG_INT_OK_DETBAT_MASK BIT(CHG_INT_OK_DETBAT_SHIFT)
174
175/* MAX77693_CHG_REG_CHG_DETAILS_00 register */
176#define CHG_DETAILS_00_CHGIN_SHIFT 5
177#define CHG_DETAILS_00_CHGIN_MASK (0x3 << CHG_DETAILS_00_CHGIN_SHIFT)
178
179/* MAX77693_CHG_REG_CHG_DETAILS_01 register */
180#define CHG_DETAILS_01_CHG_SHIFT 0
181#define CHG_DETAILS_01_BAT_SHIFT 4
182#define CHG_DETAILS_01_TREG_SHIFT 7
183#define CHG_DETAILS_01_CHG_MASK (0xf << CHG_DETAILS_01_CHG_SHIFT)
184#define CHG_DETAILS_01_BAT_MASK (0x7 << CHG_DETAILS_01_BAT_SHIFT)
185#define CHG_DETAILS_01_TREG_MASK BIT(7)
186
187/* MAX77693_CHG_REG_CHG_DETAILS_01/CHG field */
188enum max77693_charger_charging_state {
189 MAX77693_CHARGING_PREQUALIFICATION = 0x0,
190 MAX77693_CHARGING_FAST_CONST_CURRENT,
191 MAX77693_CHARGING_FAST_CONST_VOLTAGE,
192 MAX77693_CHARGING_TOP_OFF,
193 MAX77693_CHARGING_DONE,
194 MAX77693_CHARGING_HIGH_TEMP,
195 MAX77693_CHARGING_TIMER_EXPIRED,
196 MAX77693_CHARGING_THERMISTOR_SUSPEND,
197 MAX77693_CHARGING_OFF,
198 MAX77693_CHARGING_RESERVED,
199 MAX77693_CHARGING_OVER_TEMP,
200 MAX77693_CHARGING_WATCHDOG_EXPIRED,
201};
202
203/* MAX77693_CHG_REG_CHG_DETAILS_01/BAT field */
204enum max77693_charger_battery_state {
205 MAX77693_BATTERY_NOBAT = 0x0,
206 /* Dead-battery or low-battery prequalification */
207 MAX77693_BATTERY_PREQUALIFICATION,
208 MAX77693_BATTERY_TIMER_EXPIRED,
209 MAX77693_BATTERY_GOOD,
210 MAX77693_BATTERY_LOWVOLTAGE,
211 MAX77693_BATTERY_OVERVOLTAGE,
212 MAX77693_BATTERY_OVERCURRENT,
213 MAX77693_BATTERY_RESERVED,
214};
215
216/* MAX77693_CHG_REG_CHG_DETAILS_02 register */
217#define CHG_DETAILS_02_BYP_SHIFT 0
218#define CHG_DETAILS_02_BYP_MASK (0xf << CHG_DETAILS_02_BYP_SHIFT)
219
146/* MAX77693 CHG_CNFG_00 register */ 220/* MAX77693 CHG_CNFG_00 register */
147#define CHG_CNFG_00_CHG_MASK 0x1 221#define CHG_CNFG_00_CHG_MASK 0x1
148#define CHG_CNFG_00_BUCK_MASK 0x4 222#define CHG_CNFG_00_BUCK_MASK 0x4
149 223
224/* MAX77693_CHG_REG_CHG_CNFG_01 register */
225#define CHG_CNFG_01_FCHGTIME_SHIFT 0
226#define CHG_CNFG_01_CHGRSTRT_SHIFT 4
227#define CHG_CNFG_01_PQEN_SHIFT 7
228#define CHG_CNFG_01_FCHGTIME_MASK (0x7 << CHG_CNFG_01_FCHGTIME_SHIFT)
229#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
230#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
231
232/* MAX77693_CHG_REG_CHG_CNFG_03 register */
233#define CHG_CNFG_03_TOITH_SHIFT 0
234#define CHG_CNFG_03_TOTIME_SHIFT 3
235#define CHG_CNFG_03_TOITH_MASK (0x7 << CHG_CNFG_03_TOITH_SHIFT)
236#define CHG_CNFG_03_TOTIME_MASK (0x7 << CHG_CNFG_03_TOTIME_SHIFT)
237
238/* MAX77693_CHG_REG_CHG_CNFG_04 register */
239#define CHG_CNFG_04_CHGCVPRM_SHIFT 0
240#define CHG_CNFG_04_MINVSYS_SHIFT 5
241#define CHG_CNFG_04_CHGCVPRM_MASK (0x1f << CHG_CNFG_04_CHGCVPRM_SHIFT)
242#define CHG_CNFG_04_MINVSYS_MASK (0x7 << CHG_CNFG_04_MINVSYS_SHIFT)
243
244/* MAX77693_CHG_REG_CHG_CNFG_06 register */
245#define CHG_CNFG_06_CHGPROT_SHIFT 2
246#define CHG_CNFG_06_CHGPROT_MASK (0x3 << CHG_CNFG_06_CHGPROT_SHIFT)
247
248/* MAX77693_CHG_REG_CHG_CNFG_07 register */
249#define CHG_CNFG_07_REGTEMP_SHIFT 5
250#define CHG_CNFG_07_REGTEMP_MASK (0x3 << CHG_CNFG_07_REGTEMP_SHIFT)
251
252/* MAX77693_CHG_REG_CHG_CNFG_12 register */
253#define CHG_CNFG_12_B2SOVRC_SHIFT 0
254#define CHG_CNFG_12_VCHGINREG_SHIFT 3
255#define CHG_CNFG_12_B2SOVRC_MASK (0x7 << CHG_CNFG_12_B2SOVRC_SHIFT)
256#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
257
150/* MAX77693 CHG_CNFG_09 Register */ 258/* MAX77693 CHG_CNFG_09 Register */
151#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F 259#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
152 260
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index ce5dda8958fe..b1fd675fa36f 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -59,6 +59,7 @@ enum s2mps13_reg {
59 S2MPS13_REG_B6CTRL, 59 S2MPS13_REG_B6CTRL,
60 S2MPS13_REG_B6OUT, 60 S2MPS13_REG_B6OUT,
61 S2MPS13_REG_B7CTRL, 61 S2MPS13_REG_B7CTRL,
62 S2MPS13_REG_B7SW,
62 S2MPS13_REG_B7OUT, 63 S2MPS13_REG_B7OUT,
63 S2MPS13_REG_B8CTRL, 64 S2MPS13_REG_B8CTRL,
64 S2MPS13_REG_B8OUT, 65 S2MPS13_REG_B8OUT,
@@ -102,6 +103,7 @@ enum s2mps13_reg {
102 S2MPS13_REG_L26CTRL, 103 S2MPS13_REG_L26CTRL,
103 S2MPS13_REG_L27CTRL, 104 S2MPS13_REG_L27CTRL,
104 S2MPS13_REG_L28CTRL, 105 S2MPS13_REG_L28CTRL,
106 S2MPS13_REG_L29CTRL,
105 S2MPS13_REG_L30CTRL, 107 S2MPS13_REG_L30CTRL,
106 S2MPS13_REG_L31CTRL, 108 S2MPS13_REG_L31CTRL,
107 S2MPS13_REG_L32CTRL, 109 S2MPS13_REG_L32CTRL,
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index f742b6717d52..c9d869027300 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -118,20 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
118#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) 118#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
119 119
120/** 120/**
121 * struct stmpe_gpio_platform_data - STMPE GPIO platform data
122 * @norequest_mask: bitmask specifying which GPIOs should _not_ be
123 * requestable due to different usage (e.g. touch, keypad)
124 * STMPE_GPIO_NOREQ_* macros can be used here.
125 * @setup: board specific setup callback.
126 * @remove: board specific remove callback
127 */
128struct stmpe_gpio_platform_data {
129 unsigned norequest_mask;
130 void (*setup)(struct stmpe *stmpe, unsigned gpio_base);
131 void (*remove)(struct stmpe *stmpe, unsigned gpio_base);
132};
133
134/**
135 * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform 121 * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
136 * data 122 * data
137 * @sample_time: ADC converstion time in number of clock. 123 * @sample_time: ADC converstion time in number of clock.
@@ -182,7 +168,6 @@ struct stmpe_ts_platform_data {
182 * @irq_over_gpio: true if gpio is used to get irq 168 * @irq_over_gpio: true if gpio is used to get irq
183 * @irq_gpio: gpio number over which irq will be requested (significant only if 169 * @irq_gpio: gpio number over which irq will be requested (significant only if
184 * irq_over_gpio is true) 170 * irq_over_gpio is true)
185 * @gpio: GPIO-specific platform data
186 * @ts: touchscreen-specific platform data 171 * @ts: touchscreen-specific platform data
187 */ 172 */
188struct stmpe_platform_data { 173struct stmpe_platform_data {
@@ -194,7 +179,6 @@ struct stmpe_platform_data {
194 int irq_gpio; 179 int irq_gpio;
195 int autosleep_timeout; 180 int autosleep_timeout;
196 181
197 struct stmpe_gpio_platform_data *gpio;
198 struct stmpe_ts_platform_data *ts; 182 struct stmpe_ts_platform_data *ts;
199}; 183};
200 184
diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h
new file mode 100644
index 000000000000..8293c3e2a82a
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-matrix.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright (C) 2014 Atmel Corporation.
3 *
4 * Memory Controllers (MATRIX, EBI) - System peripherals registers.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
13#define _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
14
15#define AT91SAM9260_MATRIX_MCFG 0x00
16#define AT91SAM9260_MATRIX_SCFG 0x40
17#define AT91SAM9260_MATRIX_PRS 0x80
18#define AT91SAM9260_MATRIX_MRCR 0x100
19#define AT91SAM9260_MATRIX_EBICSA 0x11c
20
21#define AT91SAM9261_MATRIX_MRCR 0x0
22#define AT91SAM9261_MATRIX_SCFG 0x4
23#define AT91SAM9261_MATRIX_TCR 0x24
24#define AT91SAM9261_MATRIX_EBICSA 0x30
25#define AT91SAM9261_MATRIX_USBPUCR 0x34
26
27#define AT91SAM9263_MATRIX_MCFG 0x00
28#define AT91SAM9263_MATRIX_SCFG 0x40
29#define AT91SAM9263_MATRIX_PRS 0x80
30#define AT91SAM9263_MATRIX_MRCR 0x100
31#define AT91SAM9263_MATRIX_TCR 0x114
32#define AT91SAM9263_MATRIX_EBI0CSA 0x120
33#define AT91SAM9263_MATRIX_EBI1CSA 0x124
34
35#define AT91SAM9RL_MATRIX_MCFG 0x00
36#define AT91SAM9RL_MATRIX_SCFG 0x40
37#define AT91SAM9RL_MATRIX_PRS 0x80
38#define AT91SAM9RL_MATRIX_MRCR 0x100
39#define AT91SAM9RL_MATRIX_TCR 0x114
40#define AT91SAM9RL_MATRIX_EBICSA 0x120
41
42#define AT91SAM9G45_MATRIX_MCFG 0x00
43#define AT91SAM9G45_MATRIX_SCFG 0x40
44#define AT91SAM9G45_MATRIX_PRS 0x80
45#define AT91SAM9G45_MATRIX_MRCR 0x100
46#define AT91SAM9G45_MATRIX_TCR 0x110
47#define AT91SAM9G45_MATRIX_DDRMPR 0x118
48#define AT91SAM9G45_MATRIX_EBICSA 0x128
49
50#define AT91SAM9N12_MATRIX_MCFG 0x00
51#define AT91SAM9N12_MATRIX_SCFG 0x40
52#define AT91SAM9N12_MATRIX_PRS 0x80
53#define AT91SAM9N12_MATRIX_MRCR 0x100
54#define AT91SAM9N12_MATRIX_EBICSA 0x118
55
56#define AT91SAM9X5_MATRIX_MCFG 0x00
57#define AT91SAM9X5_MATRIX_SCFG 0x40
58#define AT91SAM9X5_MATRIX_PRS 0x80
59#define AT91SAM9X5_MATRIX_MRCR 0x100
60#define AT91SAM9X5_MATRIX_EBICSA 0x120
61
62#define SAMA5D3_MATRIX_MCFG 0x00
63#define SAMA5D3_MATRIX_SCFG 0x40
64#define SAMA5D3_MATRIX_PRS 0x80
65#define SAMA5D3_MATRIX_MRCR 0x100
66
67#define AT91_MATRIX_MCFG(o, x) ((o) + ((x) * 0x4))
68#define AT91_MATRIX_ULBT GENMASK(2, 0)
69#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
70#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
71#define AT91_MATRIX_ULBT_FOUR (2 << 0)
72#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
73#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
74
75#define AT91_MATRIX_SCFG(o, x) ((o) + ((x) * 0x4))
76#define AT91_MATRIX_SLOT_CYCLE GENMASK(7, 0)
77#define AT91_MATRIX_DEFMSTR_TYPE GENMASK(17, 16)
78#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
79#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
80#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
81#define AT91_MATRIX_FIXED_DEFMSTR GENMASK(20, 18)
82#define AT91_MATRIX_ARBT GENMASK(25, 24)
83#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
84#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
85
86#define AT91_MATRIX_ITCM_SIZE GENMASK(3, 0)
87#define AT91_MATRIX_ITCM_0 (0 << 0)
88#define AT91_MATRIX_ITCM_16 (5 << 0)
89#define AT91_MATRIX_ITCM_32 (6 << 0)
90#define AT91_MATRIX_ITCM_64 (7 << 0)
91#define AT91_MATRIX_DTCM_SIZE GENMASK(7, 4)
92#define AT91_MATRIX_DTCM_0 (0 << 4)
93#define AT91_MATRIX_DTCM_16 (5 << 4)
94#define AT91_MATRIX_DTCM_32 (6 << 4)
95#define AT91_MATRIX_DTCM_64 (7 << 4)
96
97#define AT91_MATRIX_PRAS(o, x) ((o) + ((x) * 0x8))
98#define AT91_MATRIX_PRBS(o, x) ((o) + ((x) * 0x8) + 0x4)
99#define AT91_MATRIX_MPR(x) GENMASK(((x) * 0x4) + 1, ((x) * 0x4))
100
101#define AT91_MATRIX_RCB(x) BIT(x)
102
103#define AT91_MATRIX_CSA(cs, val) (val << (cs))
104#define AT91_MATRIX_DBPUC BIT(8)
105#define AT91_MATRIX_DBPDC BIT(9)
106#define AT91_MATRIX_VDDIOMSEL BIT(16)
107#define AT91_MATRIX_VDDIOMSEL_1_8V (0 << 16)
108#define AT91_MATRIX_VDDIOMSEL_3_3V (1 << 16)
109#define AT91_MATRIX_EBI_IOSR BIT(17)
110#define AT91_MATRIX_DDR_IOSR BIT(18)
111#define AT91_MATRIX_NFD0_SELECT BIT(24)
112#define AT91_MATRIX_DDR_MP_EN BIT(25)
113#define AT91_MATRIX_EBI_NUM_CS 8
114
115#define AT91_MATRIX_USBPUCR_PUON BIT(30)
116
117#endif /* _LINUX_MFD_SYSCON_ATMEL_MATRIX_H */
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
new file mode 100644
index 000000000000..be6ebe64eebe
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -0,0 +1,173 @@
1/*
2 * Atmel SMC (Static Memory Controller) register offsets and bit definitions.
3 *
4 * Copyright (C) 2014 Atmel
5 * Copyright (C) 2014 Free Electrons
6 *
7 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
15#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
16
17#include <linux/kernel.h>
18#include <linux/regmap.h>
19
20#define AT91SAM9_SMC_GENERIC 0x00
21#define AT91SAM9_SMC_GENERIC_BLK_SZ 0x10
22
23#define SAMA5_SMC_GENERIC 0x600
24#define SAMA5_SMC_GENERIC_BLK_SZ 0x14
25
26#define AT91SAM9_SMC_SETUP(o) ((o) + 0x00)
27#define AT91SAM9_SMC_NWESETUP(x) (x)
28#define AT91SAM9_SMC_NCS_WRSETUP(x) ((x) << 8)
29#define AT91SAM9_SMC_NRDSETUP(x) ((x) << 16)
30#define AT91SAM9_SMC_NCS_NRDSETUP(x) ((x) << 24)
31
32#define AT91SAM9_SMC_PULSE(o) ((o) + 0x04)
33#define AT91SAM9_SMC_NWEPULSE(x) (x)
34#define AT91SAM9_SMC_NCS_WRPULSE(x) ((x) << 8)
35#define AT91SAM9_SMC_NRDPULSE(x) ((x) << 16)
36#define AT91SAM9_SMC_NCS_NRDPULSE(x) ((x) << 24)
37
38#define AT91SAM9_SMC_CYCLE(o) ((o) + 0x08)
39#define AT91SAM9_SMC_NWECYCLE(x) (x)
40#define AT91SAM9_SMC_NRDCYCLE(x) ((x) << 16)
41
42#define AT91SAM9_SMC_MODE(o) ((o) + 0x0c)
43#define SAMA5_SMC_MODE(o) ((o) + 0x10)
44#define AT91_SMC_READMODE BIT(0)
45#define AT91_SMC_READMODE_NCS (0 << 0)
46#define AT91_SMC_READMODE_NRD (1 << 0)
47#define AT91_SMC_WRITEMODE BIT(1)
48#define AT91_SMC_WRITEMODE_NCS (0 << 1)
49#define AT91_SMC_WRITEMODE_NWE (1 << 1)
50#define AT91_SMC_EXNWMODE GENMASK(5, 4)
51#define AT91_SMC_EXNWMODE_DISABLE (0 << 4)
52#define AT91_SMC_EXNWMODE_FROZEN (2 << 4)
53#define AT91_SMC_EXNWMODE_READY (3 << 4)
54#define AT91_SMC_BAT BIT(8)
55#define AT91_SMC_BAT_SELECT (0 << 8)
56#define AT91_SMC_BAT_WRITE (1 << 8)
57#define AT91_SMC_DBW GENMASK(13, 12)
58#define AT91_SMC_DBW_8 (0 << 12)
59#define AT91_SMC_DBW_16 (1 << 12)
60#define AT91_SMC_DBW_32 (2 << 12)
61#define AT91_SMC_TDF GENMASK(19, 16)
62#define AT91_SMC_TDF_(x) ((((x) - 1) << 16) & AT91_SMC_TDF)
63#define AT91_SMC_TDF_MAX 16
64#define AT91_SMC_TDFMODE_OPTIMIZED BIT(20)
65#define AT91_SMC_PMEN BIT(24)
66#define AT91_SMC_PS GENMASK(29, 28)
67#define AT91_SMC_PS_4 (0 << 28)
68#define AT91_SMC_PS_8 (1 << 28)
69#define AT91_SMC_PS_16 (2 << 28)
70#define AT91_SMC_PS_32 (3 << 28)
71
72
73/*
74 * This function converts a setup timing expressed in nanoseconds into an
75 * encoded value that can be written in the SMC_SETUP register.
76 *
77 * The following formula is described in atmel datasheets (section
78 * "SMC Setup Register"):
79 *
80 * setup length = (128* SETUP[5] + SETUP[4:0])
81 *
82 * where setup length is the timing expressed in cycles.
83 */
84static inline u32 at91sam9_smc_setup_ns_to_cycles(unsigned int clk_rate,
85 u32 timing_ns)
86{
87 u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
88 u32 coded_cycles = 0;
89 u32 cycles;
90
91 cycles = DIV_ROUND_UP(timing_ns, clk_period);
92 if (cycles / 32) {
93 coded_cycles |= 1 << 5;
94 if (cycles < 128)
95 cycles = 0;
96 }
97
98 coded_cycles |= cycles % 32;
99
100 return coded_cycles;
101}
102
103/*
104 * This function converts a pulse timing expressed in nanoseconds into an
105 * encoded value that can be written in the SMC_PULSE register.
106 *
107 * The following formula is described in atmel datasheets (section
108 * "SMC Pulse Register"):
109 *
110 * pulse length = (256* PULSE[6] + PULSE[5:0])
111 *
112 * where pulse length is the timing expressed in cycles.
113 */
114static inline u32 at91sam9_smc_pulse_ns_to_cycles(unsigned int clk_rate,
115 u32 timing_ns)
116{
117 u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
118 u32 coded_cycles = 0;
119 u32 cycles;
120
121 cycles = DIV_ROUND_UP(timing_ns, clk_period);
122 if (cycles / 64) {
123 coded_cycles |= 1 << 6;
124 if (cycles < 256)
125 cycles = 0;
126 }
127
128 coded_cycles |= cycles % 64;
129
130 return coded_cycles;
131}
132
133/*
134 * This function converts a cycle timing expressed in nanoseconds into an
135 * encoded value that can be written in the SMC_CYCLE register.
136 *
137 * The following formula is described in atmel datasheets (section
138 * "SMC Cycle Register"):
139 *
140 * cycle length = (CYCLE[8:7]*256 + CYCLE[6:0])
141 *
142 * where cycle length is the timing expressed in cycles.
143 */
144static inline u32 at91sam9_smc_cycle_ns_to_cycles(unsigned int clk_rate,
145 u32 timing_ns)
146{
147 u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
148 u32 coded_cycles = 0;
149 u32 cycles;
150
151 cycles = DIV_ROUND_UP(timing_ns, clk_period);
152 if (cycles / 128) {
153 coded_cycles = cycles / 256;
154 cycles %= 256;
155 if (cycles >= 128) {
156 coded_cycles++;
157 cycles = 0;
158 }
159
160 if (coded_cycles > 0x3) {
161 coded_cycles = 0x3;
162 cycles = 0x7f;
163 }
164
165 coded_cycles <<= 7;
166 }
167
168 coded_cycles |= cycles % 128;
169
170 return coded_cycles;
171}
172
173#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h
new file mode 100644
index 000000000000..278b1b1549e9
--- /dev/null
+++ b/include/linux/mfd/syscon/exynos4-pmu.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2015 Samsung Electronics Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
10#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
11
12/* Exynos4 PMU register definitions */
13
14/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
15#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4)
16#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0)
17#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
18#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
19#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
20
21#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index e1c12d84c26a..c203c9c56776 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -163,24 +163,12 @@ struct tc3589x_keypad_platform_data {
163}; 163};
164 164
165/** 165/**
166 * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data
167 * @setup: callback for board-specific initialization
168 * @remove: callback for board-specific teardown
169 */
170struct tc3589x_gpio_platform_data {
171 void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base);
172 void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base);
173};
174
175/**
176 * struct tc3589x_platform_data - TC3589x platform data 166 * struct tc3589x_platform_data - TC3589x platform data
177 * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) 167 * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
178 * @gpio: GPIO-specific platform data
179 * @keypad: keypad-specific platform data 168 * @keypad: keypad-specific platform data
180 */ 169 */
181struct tc3589x_platform_data { 170struct tc3589x_platform_data {
182 unsigned int block; 171 unsigned int block;
183 struct tc3589x_gpio_platform_data *gpio;
184 const struct tc3589x_keypad_platform_data *keypad; 172 const struct tc3589x_keypad_platform_data *keypad;
185}; 173};
186 174
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index e2e70053470e..3f4e994ace2b 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -52,6 +52,7 @@
52 52
53/* IRQ enable */ 53/* IRQ enable */
54#define IRQENB_HW_PEN BIT(0) 54#define IRQENB_HW_PEN BIT(0)
55#define IRQENB_EOS BIT(1)
55#define IRQENB_FIFO0THRES BIT(2) 56#define IRQENB_FIFO0THRES BIT(2)
56#define IRQENB_FIFO0OVRRUN BIT(3) 57#define IRQENB_FIFO0OVRRUN BIT(3)
57#define IRQENB_FIFO0UNDRFLW BIT(4) 58#define IRQENB_FIFO0UNDRFLW BIT(4)
@@ -107,7 +108,7 @@
107/* Charge delay */ 108/* Charge delay */
108#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0) 109#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
109#define CHARGEDLY_OPEN(val) ((val) << 0) 110#define CHARGEDLY_OPEN(val) ((val) << 0)
110#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(1) 111#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
111 112
112/* Control register */ 113/* Control register */
113#define CNTRLREG_TSCSSENB BIT(0) 114#define CNTRLREG_TSCSSENB BIT(0)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 57388171610d..605812820e48 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -96,11 +96,6 @@
96#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) 96#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
97 97
98/* 98/*
99 * Some controllers have DMA enable/disable register
100 */
101#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
102
103/*
104 * Some controllers allows to set SDx actual clock 99 * Some controllers allows to set SDx actual clock
105 */ 100 */
106#define TMIO_MMC_CLK_ACTUAL (1 << 10) 101#define TMIO_MMC_CLK_ACTUAL (1 << 10)
@@ -112,18 +107,6 @@ void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
112 107
113struct dma_chan; 108struct dma_chan;
114 109
115struct tmio_mmc_dma {
116 void *chan_priv_tx;
117 void *chan_priv_rx;
118 int slave_id_tx;
119 int slave_id_rx;
120 int alignment_shift;
121 dma_addr_t dma_rx_offset;
122 bool (*filter)(struct dma_chan *chan, void *arg);
123};
124
125struct tmio_mmc_host;
126
127/* 110/*
128 * data for the MMC controller 111 * data for the MMC controller
129 */ 112 */
@@ -132,19 +115,12 @@ struct tmio_mmc_data {
132 unsigned long capabilities; 115 unsigned long capabilities;
133 unsigned long capabilities2; 116 unsigned long capabilities2;
134 unsigned long flags; 117 unsigned long flags;
135 unsigned long bus_shift;
136 u32 ocr_mask; /* available voltages */ 118 u32 ocr_mask; /* available voltages */
137 struct tmio_mmc_dma *dma;
138 struct device *dev;
139 unsigned int cd_gpio; 119 unsigned int cd_gpio;
120 int alignment_shift;
121 dma_addr_t dma_rx_offset;
140 void (*set_pwr)(struct platform_device *host, int state); 122 void (*set_pwr)(struct platform_device *host, int state);
141 void (*set_clk_div)(struct platform_device *host, int state); 123 void (*set_clk_div)(struct platform_device *host, int state);
142 int (*write16_hook)(struct tmio_mmc_host *host, int addr);
143 /* clock management callbacks */
144 int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
145 void (*clk_disable)(struct platform_device *pdev);
146 int (*multi_io_quirk)(struct mmc_card *card,
147 unsigned int direction, int blk_size);
148}; 124};
149 125
150/* 126/*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index fab9b32ace8e..78baed5f2952 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -67,7 +67,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
67 67
68#ifdef CONFIG_NUMA_BALANCING 68#ifdef CONFIG_NUMA_BALANCING
69extern bool pmd_trans_migrating(pmd_t pmd); 69extern bool pmd_trans_migrating(pmd_t pmd);
70extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
71extern int migrate_misplaced_page(struct page *page, 70extern int migrate_misplaced_page(struct page *page,
72 struct vm_area_struct *vma, int node); 71 struct vm_area_struct *vma, int node);
73extern bool migrate_ratelimited(int node); 72extern bool migrate_ratelimited(int node);
@@ -76,9 +75,6 @@ static inline bool pmd_trans_migrating(pmd_t pmd)
76{ 75{
77 return false; 76 return false;
78} 77}
79static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
80{
81}
82static inline int migrate_misplaced_page(struct page *page, 78static inline int migrate_misplaced_page(struct page *page,
83 struct vm_area_struct *vma, int node) 79 struct vm_area_struct *vma, int node)
84{ 80{
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 64d25941b329..7b6d4e9ff603 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -71,6 +71,7 @@ enum {
71 71
72 /*master notify fw on finish for slave's flr*/ 72 /*master notify fw on finish for slave's flr*/
73 MLX4_CMD_INFORM_FLR_DONE = 0x5b, 73 MLX4_CMD_INFORM_FLR_DONE = 0x5b,
74 MLX4_CMD_VIRT_PORT_MAP = 0x5c,
74 MLX4_CMD_GET_OP_REQ = 0x59, 75 MLX4_CMD_GET_OP_REQ = 0x59,
75 76
76 /* TPT commands */ 77 /* TPT commands */
@@ -165,9 +166,15 @@ enum {
165}; 166};
166 167
167enum { 168enum {
168 MLX4_CMD_TIME_CLASS_A = 10000, 169 MLX4_CMD_TIME_CLASS_A = 60000,
169 MLX4_CMD_TIME_CLASS_B = 10000, 170 MLX4_CMD_TIME_CLASS_B = 60000,
170 MLX4_CMD_TIME_CLASS_C = 10000, 171 MLX4_CMD_TIME_CLASS_C = 60000,
172};
173
174enum {
175 /* virtual to physical port mapping opcode modifiers */
176 MLX4_GET_PORT_VIRT2PHY = 0x0,
177 MLX4_SET_PORT_VIRT2PHY = 0x1,
171}; 178};
172 179
173enum { 180enum {
@@ -279,6 +286,8 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
279int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); 286int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
280int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 287int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
281 struct mlx4_config_dev_params *params); 288 struct mlx4_config_dev_params *params);
289void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
290void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev);
282/* 291/*
283 * mlx4_get_slave_default_vlan - 292 * mlx4_get_slave_default_vlan -
284 * return true if VST ( default vlan) 293 * return true if VST ( default vlan)
@@ -288,5 +297,6 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
288 u16 *vlan, u8 *qos); 297 u16 *vlan, u8 *qos);
289 298
290#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) 299#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
300#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17)
291 301
292#endif /* MLX4_CMD_H */ 302#endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 25c791e295fd..e4ebff7e9d02 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -42,7 +42,7 @@
42 42
43#include <linux/atomic.h> 43#include <linux/atomic.h>
44 44
45#include <linux/clocksource.h> 45#include <linux/timecounter.h>
46 46
47#define MAX_MSIX_P_PORT 17 47#define MAX_MSIX_P_PORT 17
48#define MAX_MSIX 64 48#define MAX_MSIX 64
@@ -70,6 +70,7 @@ enum {
70 MLX4_FLAG_SLAVE = 1 << 3, 70 MLX4_FLAG_SLAVE = 1 << 3,
71 MLX4_FLAG_SRIOV = 1 << 4, 71 MLX4_FLAG_SRIOV = 1 << 4,
72 MLX4_FLAG_OLD_REG_MAC = 1 << 6, 72 MLX4_FLAG_OLD_REG_MAC = 1 << 6,
73 MLX4_FLAG_BONDED = 1 << 7
73}; 74};
74 75
75enum { 76enum {
@@ -97,7 +98,7 @@ enum {
97 MLX4_MAX_NUM_PF = 16, 98 MLX4_MAX_NUM_PF = 16,
98 MLX4_MAX_NUM_VF = 126, 99 MLX4_MAX_NUM_VF = 126,
99 MLX4_MAX_NUM_VF_P_PORT = 64, 100 MLX4_MAX_NUM_VF_P_PORT = 64,
100 MLX4_MFUNC_MAX = 80, 101 MLX4_MFUNC_MAX = 128,
101 MLX4_MAX_EQ_NUM = 1024, 102 MLX4_MAX_EQ_NUM = 1024,
102 MLX4_MFUNC_EQ_NUM = 4, 103 MLX4_MFUNC_EQ_NUM = 4,
103 MLX4_MFUNC_MAX_EQES = 8, 104 MLX4_MFUNC_MAX_EQES = 8,
@@ -200,7 +201,9 @@ enum {
200 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, 201 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, 202 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, 203 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
203 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 204 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
205 MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
206 MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21
204}; 207};
205 208
206enum { 209enum {
@@ -208,6 +211,10 @@ enum {
208 MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 211 MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
209}; 212};
210 213
214enum {
215 MLX4_VF_CAP_FLAG_RESET = 1 << 0
216};
217
211/* bit enums for an 8-bit flags field indicating special use 218/* bit enums for an 8-bit flags field indicating special use
212 * QPs which require special handling in qp_reserve_range. 219 * QPs which require special handling in qp_reserve_range.
213 * Currently, this only includes QPs used by the ETH interface, 220 * Currently, this only includes QPs used by the ETH interface,
@@ -248,9 +255,14 @@ enum {
248 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, 255 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
249 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, 256 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
250 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, 257 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
258 MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
251 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, 259 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
252}; 260};
253 261
262enum {
263 MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP
264};
265
254enum mlx4_event { 266enum mlx4_event {
255 MLX4_EVENT_TYPE_COMP = 0x00, 267 MLX4_EVENT_TYPE_COMP = 0x00,
256 MLX4_EVENT_TYPE_PATH_MIG = 0x01, 268 MLX4_EVENT_TYPE_PATH_MIG = 0x01,
@@ -276,6 +288,7 @@ enum mlx4_event {
276 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, 288 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
277 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, 289 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
278 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, 290 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
291 MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
279 MLX4_EVENT_TYPE_NONE = 0xff, 292 MLX4_EVENT_TYPE_NONE = 0xff,
280}; 293};
281 294
@@ -285,6 +298,11 @@ enum {
285}; 298};
286 299
287enum { 300enum {
301 MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
302 MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
303};
304
305enum {
288 MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, 306 MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
289}; 307};
290 308
@@ -411,6 +429,16 @@ enum {
411 MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, 429 MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
412}; 430};
413 431
432enum {
433 MLX4_DEVICE_STATE_UP = 1 << 0,
434 MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
435};
436
437enum {
438 MLX4_INTERFACE_STATE_UP = 1 << 0,
439 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
440};
441
414#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 442#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
415 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) 443 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
416 444
@@ -535,6 +563,7 @@ struct mlx4_caps {
535 u8 alloc_res_qp_mask; 563 u8 alloc_res_qp_mask;
536 u32 dmfs_high_rate_qpn_base; 564 u32 dmfs_high_rate_qpn_base;
537 u32 dmfs_high_rate_qpn_range; 565 u32 dmfs_high_rate_qpn_range;
566 u32 vf_caps;
538}; 567};
539 568
540struct mlx4_buf_list { 569struct mlx4_buf_list {
@@ -660,6 +689,8 @@ struct mlx4_cq {
660 void (*comp)(struct mlx4_cq *); 689 void (*comp)(struct mlx4_cq *);
661 void *priv; 690 void *priv;
662 } tasklet_ctx; 691 } tasklet_ctx;
692 int reset_notify_added;
693 struct list_head reset_notify;
663}; 694};
664 695
665struct mlx4_qp { 696struct mlx4_qp {
@@ -744,8 +775,23 @@ struct mlx4_vf_dev {
744 u8 n_ports; 775 u8 n_ports;
745}; 776};
746 777
747struct mlx4_dev { 778struct mlx4_dev_persistent {
748 struct pci_dev *pdev; 779 struct pci_dev *pdev;
780 struct mlx4_dev *dev;
781 int nvfs[MLX4_MAX_PORTS + 1];
782 int num_vfs;
783 enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1];
784 enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1];
785 struct work_struct catas_work;
786 struct workqueue_struct *catas_wq;
787 struct mutex device_state_mutex; /* protect HW state */
788 u8 state;
789 struct mutex interface_state_mutex; /* protect SW state */
790 u8 interface_state;
791};
792
793struct mlx4_dev {
794 struct mlx4_dev_persistent *persist;
749 unsigned long flags; 795 unsigned long flags;
750 unsigned long num_slaves; 796 unsigned long num_slaves;
751 struct mlx4_caps caps; 797 struct mlx4_caps caps;
@@ -754,13 +800,11 @@ struct mlx4_dev {
754 struct radix_tree_root qp_table_tree; 800 struct radix_tree_root qp_table_tree;
755 u8 rev_id; 801 u8 rev_id;
756 char board_id[MLX4_BOARD_ID_LEN]; 802 char board_id[MLX4_BOARD_ID_LEN];
757 int num_vfs;
758 int numa_node; 803 int numa_node;
759 int oper_log_mgm_entry_size; 804 int oper_log_mgm_entry_size;
760 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 805 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
761 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 806 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
762 struct mlx4_vf_dev *dev_vfs; 807 struct mlx4_vf_dev *dev_vfs;
763 int nvfs[MLX4_MAX_PORTS + 1];
764}; 808};
765 809
766struct mlx4_eqe { 810struct mlx4_eqe {
@@ -832,6 +876,11 @@ struct mlx4_eqe {
832 } __packed tbl_change_info; 876 } __packed tbl_change_info;
833 } params; 877 } params;
834 } __packed port_mgmt_change; 878 } __packed port_mgmt_change;
879 struct {
880 u8 reserved[3];
881 u8 port;
882 u32 reserved1[5];
883 } __packed bad_cable;
835 } event; 884 } event;
836 u8 slave_id; 885 u8 slave_id;
837 u8 reserved3[2]; 886 u8 reserved3[2];
@@ -1338,6 +1387,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
1338int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); 1387int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
1339 1388
1340int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); 1389int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
1390int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
1391int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
1341int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); 1392int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
1342int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); 1393int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
1343int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, 1394int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 022055c8fb26..9553a73d2049 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -49,6 +49,10 @@ enum mlx4_dev_event {
49 MLX4_DEV_EVENT_SLAVE_SHUTDOWN, 49 MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
50}; 50};
51 51
52enum {
53 MLX4_INTFF_BONDING = 1 << 0
54};
55
52struct mlx4_interface { 56struct mlx4_interface {
53 void * (*add) (struct mlx4_dev *dev); 57 void * (*add) (struct mlx4_dev *dev);
54 void (*remove)(struct mlx4_dev *dev, void *context); 58 void (*remove)(struct mlx4_dev *dev, void *context);
@@ -57,11 +61,26 @@ struct mlx4_interface {
57 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); 61 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
58 struct list_head list; 62 struct list_head list;
59 enum mlx4_protocol protocol; 63 enum mlx4_protocol protocol;
64 int flags;
60}; 65};
61 66
62int mlx4_register_interface(struct mlx4_interface *intf); 67int mlx4_register_interface(struct mlx4_interface *intf);
63void mlx4_unregister_interface(struct mlx4_interface *intf); 68void mlx4_unregister_interface(struct mlx4_interface *intf);
64 69
70int mlx4_bond(struct mlx4_dev *dev);
71int mlx4_unbond(struct mlx4_dev *dev);
72static inline int mlx4_is_bonded(struct mlx4_dev *dev)
73{
74 return !!(dev->flags & MLX4_FLAG_BONDED);
75}
76
77struct mlx4_port_map {
78 u8 port1;
79 u8 port2;
80};
81
82int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
83
65void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); 84void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
66 85
67static inline u64 mlx4_mac_to_u64(u8 *addr) 86static inline u64 mlx4_mac_to_u64(u8 *addr)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 467ccdf94c98..2bbc62aa818a 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -96,6 +96,7 @@ enum {
96 MLX4_QP_BIT_RRE = 1 << 15, 96 MLX4_QP_BIT_RRE = 1 << 15,
97 MLX4_QP_BIT_RWE = 1 << 14, 97 MLX4_QP_BIT_RWE = 1 << 14,
98 MLX4_QP_BIT_RAE = 1 << 13, 98 MLX4_QP_BIT_RAE = 1 << 13,
99 MLX4_QP_BIT_FPP = 1 << 3,
99 MLX4_QP_BIT_RIC = 1 << 4, 100 MLX4_QP_BIT_RIC = 1 << 4,
100}; 101};
101 102
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..47a93928b90f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -138,7 +138,6 @@ extern unsigned int kobjsize(const void *objp);
138#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 138#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
139#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 139#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
140#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 140#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
141#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
142#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 141#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
143#define VM_ARCH_2 0x02000000 142#define VM_ARCH_2 0x02000000
144#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 143#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
@@ -206,27 +205,26 @@ extern unsigned int kobjsize(const void *objp);
206extern pgprot_t protection_map[16]; 205extern pgprot_t protection_map[16];
207 206
208#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 207#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
209#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 208#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
210#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 209#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
211#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 210#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
212#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 211#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
213#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ 212#define FAULT_FLAG_TRIED 0x20 /* Second try */
214#define FAULT_FLAG_TRIED 0x40 /* second try */ 213#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
215#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
216 214
217/* 215/*
218 * vm_fault is filled by the the pagefault handler and passed to the vma's 216 * vm_fault is filled by the the pagefault handler and passed to the vma's
219 * ->fault function. The vma's ->fault is responsible for returning a bitmask 217 * ->fault function. The vma's ->fault is responsible for returning a bitmask
220 * of VM_FAULT_xxx flags that give details about how the fault was handled. 218 * of VM_FAULT_xxx flags that give details about how the fault was handled.
221 * 219 *
222 * pgoff should be used in favour of virtual_address, if possible. If pgoff 220 * pgoff should be used in favour of virtual_address, if possible.
223 * is used, one may implement ->remap_pages to get nonlinear mapping support.
224 */ 221 */
225struct vm_fault { 222struct vm_fault {
226 unsigned int flags; /* FAULT_FLAG_xxx flags */ 223 unsigned int flags; /* FAULT_FLAG_xxx flags */
227 pgoff_t pgoff; /* Logical page offset based on vma */ 224 pgoff_t pgoff; /* Logical page offset based on vma */
228 void __user *virtual_address; /* Faulting virtual address */ 225 void __user *virtual_address; /* Faulting virtual address */
229 226
227 struct page *cow_page; /* Handler may choose to COW */
230 struct page *page; /* ->fault handlers should return a 228 struct page *page; /* ->fault handlers should return a
231 * page here, unless VM_FAULT_NOPAGE 229 * page here, unless VM_FAULT_NOPAGE
232 * is set (which is also implied by 230 * is set (which is also implied by
@@ -287,9 +285,13 @@ struct vm_operations_struct {
287 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 285 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
288 unsigned long addr); 286 unsigned long addr);
289#endif 287#endif
290 /* called by sys_remap_file_pages() to populate non-linear mapping */ 288 /*
291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, 289 * Called by vm_normal_page() for special PTEs to find the
292 unsigned long size, pgoff_t pgoff); 290 * page for @addr. This is useful if the default behavior
291 * (using pte_page()) would not find the correct page.
292 */
293 struct page *(*find_special_page)(struct vm_area_struct *vma,
294 unsigned long addr);
293}; 295};
294 296
295struct mmu_gather; 297struct mmu_gather;
@@ -446,6 +448,12 @@ static inline struct page *compound_head_by_tail(struct page *tail)
446 return tail; 448 return tail;
447} 449}
448 450
451/*
452 * Since either compound page could be dismantled asynchronously in THP
453 * or we access asynchronously arbitrary positioned struct page, there
454 * would be tail flag race. To handle this race, we should call
455 * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
456 */
449static inline struct page *compound_head(struct page *page) 457static inline struct page *compound_head(struct page *page)
450{ 458{
451 if (unlikely(PageTail(page))) 459 if (unlikely(PageTail(page)))
@@ -454,6 +462,18 @@ static inline struct page *compound_head(struct page *page)
454} 462}
455 463
456/* 464/*
465 * If we access compound page synchronously such as access to
466 * allocated page, there is no need to handle tail flag race, so we can
467 * check tail flag directly without any synchronization primitive.
468 */
469static inline struct page *compound_head_fast(struct page *page)
470{
471 if (unlikely(PageTail(page)))
472 return page->first_page;
473 return page;
474}
475
476/*
457 * The atomic page->_mapcount, starts from -1: so that transitions 477 * The atomic page->_mapcount, starts from -1: so that transitions
458 * both from it and to it can be tracked, using atomic_inc_and_test 478 * both from it and to it can be tracked, using atomic_inc_and_test
459 * and atomic_add_negative(-1). 479 * and atomic_add_negative(-1).
@@ -465,7 +485,8 @@ static inline void page_mapcount_reset(struct page *page)
465 485
466static inline int page_mapcount(struct page *page) 486static inline int page_mapcount(struct page *page)
467{ 487{
468 return atomic_read(&(page)->_mapcount) + 1; 488 VM_BUG_ON_PAGE(PageSlab(page), page);
489 return atomic_read(&page->_mapcount) + 1;
469} 490}
470 491
471static inline int page_count(struct page *page) 492static inline int page_count(struct page *page)
@@ -531,7 +552,14 @@ static inline void get_page(struct page *page)
531static inline struct page *virt_to_head_page(const void *x) 552static inline struct page *virt_to_head_page(const void *x)
532{ 553{
533 struct page *page = virt_to_page(x); 554 struct page *page = virt_to_page(x);
534 return compound_head(page); 555
556 /*
557 * We don't need to worry about synchronization of tail flag
558 * when we call virt_to_head_page() since it is only called for
559 * already allocated page and this page won't be freed until
560 * this virt_to_head_page() is finished. So use _fast variant.
561 */
562 return compound_head_fast(page);
535} 563}
536 564
537/* 565/*
@@ -601,29 +629,28 @@ int split_free_page(struct page *page);
601 * prototype for that function and accessor functions. 629 * prototype for that function and accessor functions.
602 * These are _only_ valid on the head of a PG_compound page. 630 * These are _only_ valid on the head of a PG_compound page.
603 */ 631 */
604typedef void compound_page_dtor(struct page *);
605 632
606static inline void set_compound_page_dtor(struct page *page, 633static inline void set_compound_page_dtor(struct page *page,
607 compound_page_dtor *dtor) 634 compound_page_dtor *dtor)
608{ 635{
609 page[1].lru.next = (void *)dtor; 636 page[1].compound_dtor = dtor;
610} 637}
611 638
612static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 639static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
613{ 640{
614 return (compound_page_dtor *)page[1].lru.next; 641 return page[1].compound_dtor;
615} 642}
616 643
617static inline int compound_order(struct page *page) 644static inline int compound_order(struct page *page)
618{ 645{
619 if (!PageHead(page)) 646 if (!PageHead(page))
620 return 0; 647 return 0;
621 return (unsigned long)page[1].lru.prev; 648 return page[1].compound_order;
622} 649}
623 650
624static inline void set_compound_order(struct page *page, unsigned long order) 651static inline void set_compound_order(struct page *page, unsigned long order)
625{ 652{
626 page[1].lru.prev = (void *)order; 653 page[1].compound_order = order;
627} 654}
628 655
629#ifdef CONFIG_MMU 656#ifdef CONFIG_MMU
@@ -1070,6 +1097,7 @@ static inline int page_mapped(struct page *page)
1070#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1097#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
1071#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1098#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
1072#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1099#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
1100#define VM_FAULT_SIGSEGV 0x0040
1073 1101
1074#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1102#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
1075#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1103#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
@@ -1078,8 +1106,9 @@ static inline int page_mapped(struct page *page)
1078 1106
1079#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1107#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1080 1108
1081#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ 1109#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1082 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) 1110 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1111 VM_FAULT_FALLBACK)
1083 1112
1084/* Encode hstate index for a hwpoisoned large page */ 1113/* Encode hstate index for a hwpoisoned large page */
1085#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1114#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -1119,7 +1148,6 @@ extern void user_shm_unlock(size_t, struct user_struct *);
1119 * Parameter block passed down to zap_pte_range in exceptional cases. 1148 * Parameter block passed down to zap_pte_range in exceptional cases.
1120 */ 1149 */
1121struct zap_details { 1150struct zap_details {
1122 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
1123 struct address_space *check_mapping; /* Check page->mapping if set */ 1151 struct address_space *check_mapping; /* Check page->mapping if set */
1124 pgoff_t first_index; /* Lowest page->index to unmap */ 1152 pgoff_t first_index; /* Lowest page->index to unmap */
1125 pgoff_t last_index; /* Highest page->index to unmap */ 1153 pgoff_t last_index; /* Highest page->index to unmap */
@@ -1137,8 +1165,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1137 1165
1138/** 1166/**
1139 * mm_walk - callbacks for walk_page_range 1167 * mm_walk - callbacks for walk_page_range
1140 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
1141 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1142 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1168 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1143 * this handler is required to be able to handle 1169 * this handler is required to be able to handle
1144 * pmd_trans_huge() pmds. They may simply choose to 1170 * pmd_trans_huge() pmds. They may simply choose to
@@ -1146,16 +1172,18 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1146 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 1172 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1147 * @pte_hole: if set, called for each hole at all levels 1173 * @pte_hole: if set, called for each hole at all levels
1148 * @hugetlb_entry: if set, called for each hugetlb entry 1174 * @hugetlb_entry: if set, called for each hugetlb entry
1149 * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry 1175 * @test_walk: caller specific callback function to determine whether
1150 * is used. 1176 * we walk over the current vma or not. A positive returned
1177 * value means "do page table walk over the current vma,"
1178 * and a negative one means "abort current page table walk
1179 * right now." 0 means "skip the current vma."
1180 * @mm: mm_struct representing the target process of page table walk
1181 * @vma: vma currently walked (NULL if walking outside vmas)
1182 * @private: private data for callbacks' usage
1151 * 1183 *
1152 * (see walk_page_range for more details) 1184 * (see the comment on walk_page_range() for more details)
1153 */ 1185 */
1154struct mm_walk { 1186struct mm_walk {
1155 int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1156 unsigned long next, struct mm_walk *walk);
1157 int (*pud_entry)(pud_t *pud, unsigned long addr,
1158 unsigned long next, struct mm_walk *walk);
1159 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1187 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1160 unsigned long next, struct mm_walk *walk); 1188 unsigned long next, struct mm_walk *walk);
1161 int (*pte_entry)(pte_t *pte, unsigned long addr, 1189 int (*pte_entry)(pte_t *pte, unsigned long addr,
@@ -1165,12 +1193,16 @@ struct mm_walk {
1165 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 1193 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1166 unsigned long addr, unsigned long next, 1194 unsigned long addr, unsigned long next,
1167 struct mm_walk *walk); 1195 struct mm_walk *walk);
1196 int (*test_walk)(unsigned long addr, unsigned long next,
1197 struct mm_walk *walk);
1168 struct mm_struct *mm; 1198 struct mm_struct *mm;
1199 struct vm_area_struct *vma;
1169 void *private; 1200 void *private;
1170}; 1201};
1171 1202
1172int walk_page_range(unsigned long addr, unsigned long end, 1203int walk_page_range(unsigned long addr, unsigned long end,
1173 struct mm_walk *walk); 1204 struct mm_walk *walk);
1205int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1174void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1206void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1175 unsigned long end, unsigned long floor, unsigned long ceiling); 1207 unsigned long end, unsigned long floor, unsigned long ceiling);
1176int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1208int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
@@ -1234,6 +1266,17 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1234 unsigned long start, unsigned long nr_pages, 1266 unsigned long start, unsigned long nr_pages,
1235 int write, int force, struct page **pages, 1267 int write, int force, struct page **pages,
1236 struct vm_area_struct **vmas); 1268 struct vm_area_struct **vmas);
1269long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
1270 unsigned long start, unsigned long nr_pages,
1271 int write, int force, struct page **pages,
1272 int *locked);
1273long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1274 unsigned long start, unsigned long nr_pages,
1275 int write, int force, struct page **pages,
1276 unsigned int gup_flags);
1277long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1278 unsigned long start, unsigned long nr_pages,
1279 int write, int force, struct page **pages);
1237int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1280int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1238 struct page **pages); 1281 struct page **pages);
1239struct kvec; 1282struct kvec;
@@ -1366,6 +1409,11 @@ static inline void update_hiwater_vm(struct mm_struct *mm)
1366 mm->hiwater_vm = mm->total_vm; 1409 mm->hiwater_vm = mm->total_vm;
1367} 1410}
1368 1411
1412static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1413{
1414 mm->hiwater_rss = get_mm_rss(mm);
1415}
1416
1369static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1417static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1370 struct mm_struct *mm) 1418 struct mm_struct *mm)
1371{ 1419{
@@ -1405,14 +1453,45 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1405int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1453int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1406#endif 1454#endif
1407 1455
1408#ifdef __PAGETABLE_PMD_FOLDED 1456#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1409static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1457static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1410 unsigned long address) 1458 unsigned long address)
1411{ 1459{
1412 return 0; 1460 return 0;
1413} 1461}
1462
1463static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1464
1465static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1466{
1467 return 0;
1468}
1469
1470static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1471static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1472
1414#else 1473#else
1415int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1474int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1475
1476static inline void mm_nr_pmds_init(struct mm_struct *mm)
1477{
1478 atomic_long_set(&mm->nr_pmds, 0);
1479}
1480
1481static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1482{
1483 return atomic_long_read(&mm->nr_pmds);
1484}
1485
1486static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1487{
1488 atomic_long_inc(&mm->nr_pmds);
1489}
1490
1491static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1492{
1493 atomic_long_dec(&mm->nr_pmds);
1494}
1416#endif 1495#endif
1417 1496
1418int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 1497int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1775,12 +1854,6 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1775 for (vma = vma_interval_tree_iter_first(root, start, last); \ 1854 for (vma = vma_interval_tree_iter_first(root, start, last); \
1776 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 1855 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1777 1856
1778static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1779 struct list_head *list)
1780{
1781 list_add_tail(&vma->shared.nonlinear, list);
1782}
1783
1784void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 1857void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1785 struct rb_root *root); 1858 struct rb_root *root);
1786void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 1859void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
@@ -1952,7 +2025,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
1952#if VM_GROWSUP 2025#if VM_GROWSUP
1953extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2026extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1954#else 2027#else
1955 #define expand_upwards(vma, address) do { } while (0) 2028 #define expand_upwards(vma, address) (0)
1956#endif 2029#endif
1957 2030
1958/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2031/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
@@ -2108,9 +2181,8 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
2108 void __user *, size_t *, loff_t *); 2181 void __user *, size_t *, loff_t *);
2109#endif 2182#endif
2110 2183
2111unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, 2184void drop_slab(void);
2112 unsigned long nr_scanned, 2185void drop_slab_node(int nid);
2113 unsigned long nr_eligible);
2114 2186
2115#ifndef CONFIG_MMU 2187#ifndef CONFIG_MMU
2116#define randomize_va_space 0 2188#define randomize_va_space 0
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6d34aa266a8c..199a03aab8dc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -28,6 +28,8 @@ struct mem_cgroup;
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) 29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
30 30
31typedef void compound_page_dtor(struct page *);
32
31/* 33/*
32 * Each physical page in the system has a struct page associated with 34 * Each physical page in the system has a struct page associated with
33 * it to keep track of whatever it is we are using the page for at the 35 * it to keep track of whatever it is we are using the page for at the
@@ -142,6 +144,12 @@ struct page {
142 struct rcu_head rcu_head; /* Used by SLAB 144 struct rcu_head rcu_head; /* Used by SLAB
143 * when destroying via RCU 145 * when destroying via RCU
144 */ 146 */
147 /* First tail page of compound page */
148 struct {
149 compound_page_dtor *compound_dtor;
150 unsigned long compound_order;
151 };
152
145#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS 153#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
146 pgtable_t pmd_huge_pte; /* protected by page->ptl */ 154 pgtable_t pmd_huge_pte; /* protected by page->ptl */
147#endif 155#endif
@@ -273,15 +281,11 @@ struct vm_area_struct {
273 281
274 /* 282 /*
275 * For areas with an address space and backing store, 283 * For areas with an address space and backing store,
276 * linkage into the address_space->i_mmap interval tree, or 284 * linkage into the address_space->i_mmap interval tree.
277 * linkage of vma in the address_space->i_mmap_nonlinear list.
278 */ 285 */
279 union { 286 struct {
280 struct { 287 struct rb_node rb;
281 struct rb_node rb; 288 unsigned long rb_subtree_last;
282 unsigned long rb_subtree_last;
283 } linear;
284 struct list_head nonlinear;
285 } shared; 289 } shared;
286 290
287 /* 291 /*
@@ -359,7 +363,8 @@ struct mm_struct {
359 pgd_t * pgd; 363 pgd_t * pgd;
360 atomic_t mm_users; /* How many users with user space? */ 364 atomic_t mm_users; /* How many users with user space? */
361 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 365 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
362 atomic_long_t nr_ptes; /* Page table pages */ 366 atomic_long_t nr_ptes; /* PTE page table pages */
367 atomic_long_t nr_pmds; /* PMD page table pages */
363 int map_count; /* number of VMAs */ 368 int map_count; /* number of VMAs */
364 369
365 spinlock_t page_table_lock; /* Protects page tables and some counters */ 370 spinlock_t page_table_lock; /* Protects page tables and some counters */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d69c00497bd..a6cf4c063e4e 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -83,7 +83,7 @@ struct mmc_ext_csd {
83 bool hpi; /* HPI support bit */ 83 bool hpi; /* HPI support bit */
84 unsigned int hpi_cmd; /* cmd used as HPI */ 84 unsigned int hpi_cmd; /* cmd used as HPI */
85 bool bkops; /* background support bit */ 85 bool bkops; /* background support bit */
86 bool bkops_en; /* background enable bit */ 86 bool man_bkops_en; /* manual bkops enable bit */
87 unsigned int data_sector_size; /* 512 bytes or 4KB */ 87 unsigned int data_sector_size; /* 512 bytes or 4KB */
88 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ 88 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
89 unsigned int boot_ro_lock; /* ro lock support */ 89 unsigned int boot_ro_lock; /* ro lock support */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index cb2b0400d284..160448f920ac 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -182,7 +182,6 @@ extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
182extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, 182extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
183 bool is_rel_write); 183 bool is_rel_write);
184extern int mmc_hw_reset(struct mmc_host *host); 184extern int mmc_hw_reset(struct mmc_host *host);
185extern int mmc_hw_reset_check(struct mmc_host *host);
186extern int mmc_can_reset(struct mmc_card *card); 185extern int mmc_can_reset(struct mmc_card *card);
187 186
188extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); 187extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 42b724e8d503..471fb3116dbe 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -106,6 +106,11 @@ struct mmc_data;
106 * @cur_slot, @mrq and @state. These must always be updated 106 * @cur_slot, @mrq and @state. These must always be updated
107 * at the same time while holding @lock. 107 * at the same time while holding @lock.
108 * 108 *
109 * @irq_lock is an irq-safe spinlock protecting the INTMASK register
110 * to allow the interrupt handler to modify it directly. Held for only long
111 * enough to read-modify-write INTMASK and no other locks are grabbed when
112 * holding this one.
113 *
109 * The @mrq field of struct dw_mci_slot is also protected by @lock, 114 * The @mrq field of struct dw_mci_slot is also protected by @lock,
110 * and must always be written at the same time as the slot is added to 115 * and must always be written at the same time as the slot is added to
111 * @queue. 116 * @queue.
@@ -125,6 +130,7 @@ struct mmc_data;
125 */ 130 */
126struct dw_mci { 131struct dw_mci {
127 spinlock_t lock; 132 spinlock_t lock;
133 spinlock_t irq_lock;
128 void __iomem *regs; 134 void __iomem *regs;
129 135
130 struct scatterlist *sg; 136 struct scatterlist *sg;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9f322706f7cb..0c8cbe5d1550 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -166,7 +166,6 @@ struct mmc_async_req {
166 * struct mmc_slot - MMC slot functions 166 * struct mmc_slot - MMC slot functions
167 * 167 *
168 * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL 168 * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
169 * @lock: protect the @handler_priv pointer
170 * @handler_priv: MMC/SD-card slot context 169 * @handler_priv: MMC/SD-card slot context
171 * 170 *
172 * Some MMC/SD host controllers implement slot-functions like card and 171 * Some MMC/SD host controllers implement slot-functions like card and
@@ -176,7 +175,6 @@ struct mmc_async_req {
176 */ 175 */
177struct mmc_slot { 176struct mmc_slot {
178 int cd_irq; 177 int cd_irq;
179 struct mutex lock;
180 void *handler_priv; 178 void *handler_priv;
181}; 179};
182 180
@@ -197,6 +195,7 @@ struct mmc_context_info {
197}; 195};
198 196
199struct regulator; 197struct regulator;
198struct mmc_pwrseq;
200 199
201struct mmc_supply { 200struct mmc_supply {
202 struct regulator *vmmc; /* Card power supply */ 201 struct regulator *vmmc; /* Card power supply */
@@ -208,6 +207,7 @@ struct mmc_host {
208 struct device class_dev; 207 struct device class_dev;
209 int index; 208 int index;
210 const struct mmc_host_ops *ops; 209 const struct mmc_host_ops *ops;
210 struct mmc_pwrseq *pwrseq;
211 unsigned int f_min; 211 unsigned int f_min;
212 unsigned int f_max; 212 unsigned int f_max;
213 unsigned int f_init; 213 unsigned int f_init;
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 49ad7a943638..124f562118b8 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,11 +53,6 @@
53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ 53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
54#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ 54#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
55 55
56#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
57#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
58extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
59extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
60
61 /* class 3 */ 56 /* class 3 */
62#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ 57#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
63 58
@@ -433,6 +428,11 @@ struct _mmc_csd {
433#define EXT_CSD_BKOPS_LEVEL_2 0x2 428#define EXT_CSD_BKOPS_LEVEL_2 0x2
434 429
435/* 430/*
431 * BKOPS modes
432 */
433#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
434
435/*
436 * MMC_SWITCH access modes 436 * MMC_SWITCH access modes
437 */ 437 */
438 438
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 375af80bde7d..c3e3db196738 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -17,6 +17,11 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/mmc/host.h> 18#include <linux/mmc/host.h>
19 19
20struct sdhci_host_next {
21 unsigned int sg_count;
22 s32 cookie;
23};
24
20struct sdhci_host { 25struct sdhci_host {
21 /* Data set by hardware interface driver */ 26 /* Data set by hardware interface driver */
22 const char *hw_name; /* Hardware bus name */ 27 const char *hw_name; /* Hardware bus name */
@@ -106,6 +111,10 @@ struct sdhci_host {
106#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10) 111#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10)
107/* Capability register bit-63 indicates HS400 support */ 112/* Capability register bit-63 indicates HS400 support */
108#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11) 113#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11)
114/* forced tuned clock */
115#define SDHCI_QUIRK2_TUNING_WORK_AROUND (1<<12)
116/* disable the block count for single block transactions */
117#define SDHCI_QUIRK2_SUPPORT_SINGLE (1<<13)
109 118
110 int irq; /* Device IRQ */ 119 int irq; /* Device IRQ */
111 void __iomem *ioaddr; /* Mapped address */ 120 void __iomem *ioaddr; /* Mapped address */
@@ -137,6 +146,7 @@ struct sdhci_host {
137#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ 146#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
138#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ 147#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
139#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ 148#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
149#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
140 150
141 unsigned int version; /* SDHCI spec. version */ 151 unsigned int version; /* SDHCI spec. version */
142 152
@@ -202,6 +212,7 @@ struct sdhci_host {
202#define SDHCI_TUNING_MODE_1 0 212#define SDHCI_TUNING_MODE_1 0
203 struct timer_list tuning_timer; /* Timer for tuning */ 213 struct timer_list tuning_timer; /* Timer for tuning */
204 214
215 struct sdhci_host_next next_data;
205 unsigned long private[0] ____cacheline_aligned; 216 unsigned long private[0] ____cacheline_aligned;
206}; 217};
207#endif /* LINUX_MMC_SDHCI_H */ 218#endif /* LINUX_MMC_SDHCI_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0f01fe065424..996807963716 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -24,13 +24,15 @@
24 * Vendors and devices. Sort key: vendor first, device next. 24 * Vendors and devices. Sort key: vendor first, device next.
25 */ 25 */
26#define SDIO_VENDOR_ID_BROADCOM 0x02d0 26#define SDIO_VENDOR_ID_BROADCOM 0x02d0
27#define SDIO_DEVICE_ID_BROADCOM_43143 43143 27#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
28#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 28#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
29#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 29#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
30#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 30#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
31#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 31#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
32#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
32#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
33#define SDIO_DEVICE_ID_BROADCOM_43362 43362 35#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
34#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 36#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
35 37
36#define SDIO_VENDOR_ID_INTEL 0x0089 38#define SDIO_VENDOR_ID_INTEL 0x0089
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index 68927ae50845..da77e5e2041d 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -3,20 +3,10 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct platform_device;
7
8#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect" 6#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect"
9#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" 7#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
10#define SH_MOBILE_SDHI_IRQ_SDIO "sdio" 8#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
11 9
12/**
13 * struct sh_mobile_sdhi_ops - SDHI driver callbacks
14 * @cd_wakeup: trigger a card-detection run
15 */
16struct sh_mobile_sdhi_ops {
17 void (*cd_wakeup)(const struct platform_device *pdev);
18};
19
20struct sh_mobile_sdhi_info { 10struct sh_mobile_sdhi_info {
21 int dma_slave_tx; 11 int dma_slave_tx;
22 int dma_slave_rx; 12 int dma_slave_rx;
@@ -25,11 +15,6 @@ struct sh_mobile_sdhi_info {
25 unsigned long tmio_caps2; 15 unsigned long tmio_caps2;
26 u32 tmio_ocr_mask; /* available MMC voltages */ 16 u32 tmio_ocr_mask; /* available MMC voltages */
27 unsigned int cd_gpio; 17 unsigned int cd_gpio;
28
29 /* callbacks for board specific setup code */
30 int (*init)(struct platform_device *pdev,
31 const struct sh_mobile_sdhi_ops *ops);
32 void (*cleanup)(struct platform_device *pdev);
33}; 18};
34 19
35#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */ 20#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index e56fa24c9322..3945a8c9d3cb 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -15,12 +15,10 @@ struct mmc_host;
15 15
16int mmc_gpio_get_ro(struct mmc_host *host); 16int mmc_gpio_get_ro(struct mmc_host *host);
17int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio); 17int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
18void mmc_gpio_free_ro(struct mmc_host *host);
19 18
20int mmc_gpio_get_cd(struct mmc_host *host); 19int mmc_gpio_get_cd(struct mmc_host *host);
21int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, 20int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
22 unsigned int debounce); 21 unsigned int debounce);
23void mmc_gpio_free_cd(struct mmc_host *host);
24 22
25int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, 23int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
26 unsigned int idx, bool override_active_level, 24 unsigned int idx, bool override_active_level,
@@ -28,7 +26,8 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
28int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, 26int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
29 unsigned int idx, bool override_active_level, 27 unsigned int idx, bool override_active_level,
30 unsigned int debounce, bool *gpio_invert); 28 unsigned int debounce, bool *gpio_invert);
31void mmc_gpiod_free_cd(struct mmc_host *host); 29void mmc_gpio_set_cd_isr(struct mmc_host *host,
30 irqreturn_t (*isr)(int irq, void *dev_id));
32void mmc_gpiod_request_cd_irq(struct mmc_host *host); 31void mmc_gpiod_request_cd_irq(struct mmc_host *host);
33 32
34#endif 33#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2f0856d14b21..f279d9c158cd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -426,7 +426,7 @@ struct zone {
426 const char *name; 426 const char *name;
427 427
428 /* 428 /*
429 * Number of MIGRATE_RESEVE page block. To maintain for just 429 * Number of MIGRATE_RESERVE page block. To maintain for just
430 * optimization. Protected by zone->lock. 430 * optimization. Protected by zone->lock.
431 */ 431 */
432 int nr_migrate_reserve_block; 432 int nr_migrate_reserve_block;
@@ -970,7 +970,6 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
970 * @z - The cursor used as a starting point for the search 970 * @z - The cursor used as a starting point for the search
971 * @highest_zoneidx - The zone index of the highest zone to return 971 * @highest_zoneidx - The zone index of the highest zone to return
972 * @nodes - An optional nodemask to filter the zonelist with 972 * @nodes - An optional nodemask to filter the zonelist with
973 * @zone - The first suitable zone found is returned via this parameter
974 * 973 *
975 * This function returns the next zone at or below a given zone index that is 974 * This function returns the next zone at or below a given zone index that is
976 * within the allowed nodemask using a cursor as the starting point for the 975 * within the allowed nodemask using a cursor as the starting point for the
@@ -980,8 +979,7 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
980 */ 979 */
981struct zoneref *next_zones_zonelist(struct zoneref *z, 980struct zoneref *next_zones_zonelist(struct zoneref *z,
982 enum zone_type highest_zoneidx, 981 enum zone_type highest_zoneidx,
983 nodemask_t *nodes, 982 nodemask_t *nodes);
984 struct zone **zone);
985 983
986/** 984/**
987 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 985 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
@@ -1000,8 +998,10 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1000 nodemask_t *nodes, 998 nodemask_t *nodes,
1001 struct zone **zone) 999 struct zone **zone)
1002{ 1000{
1003 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, 1001 struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
1004 zone); 1002 highest_zoneidx, nodes);
1003 *zone = zonelist_zone(z);
1004 return z;
1005} 1005}
1006 1006
1007/** 1007/**
@@ -1018,7 +1018,8 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1018#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1018#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1019 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ 1019 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
1020 zone; \ 1020 zone; \
1021 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ 1021 z = next_zones_zonelist(++z, highidx, nodemask), \
1022 zone = zonelist_zone(z)) \
1022 1023
1023/** 1024/**
1024 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1025 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 745def862580..2e75ab00dbf2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -53,9 +53,9 @@ struct ieee1394_device_id {
53 53
54/** 54/**
55 * struct usb_device_id - identifies USB devices for probing and hotplugging 55 * struct usb_device_id - identifies USB devices for probing and hotplugging
56 * @match_flags: Bit mask controlling of the other fields are used to match 56 * @match_flags: Bit mask controlling which of the other fields are used to
57 * against new devices. Any field except for driver_info may be used, 57 * match against new devices. Any field except for driver_info may be
58 * although some only make sense in conjunction with other fields. 58 * used, although some only make sense in conjunction with other fields.
59 * This is usually set by a USB_DEVICE_*() macro, which sets all 59 * This is usually set by a USB_DEVICE_*() macro, which sets all
60 * other fields in this structure except for driver_info. 60 * other fields in this structure except for driver_info.
61 * @idVendor: USB vendor ID for a device; numbers are assigned 61 * @idVendor: USB vendor ID for a device; numbers are assigned
@@ -220,8 +220,7 @@ struct serio_device_id {
220/* 220/*
221 * Struct used for matching a device 221 * Struct used for matching a device
222 */ 222 */
223struct of_device_id 223struct of_device_id {
224{
225 char name[32]; 224 char name[32];
226 char type[32]; 225 char type[32];
227 char compatible[128]; 226 char compatible[128];
diff --git a/include/linux/module.h b/include/linux/module.h
index ebfb0e153c6a..42999fe2dbd0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -135,7 +135,7 @@ void trim_init_extable(struct module *m);
135#ifdef MODULE 135#ifdef MODULE
136/* Creates an alias so file2alias.c can find device table. */ 136/* Creates an alias so file2alias.c can find device table. */
137#define MODULE_DEVICE_TABLE(type, name) \ 137#define MODULE_DEVICE_TABLE(type, name) \
138 extern const struct type##_device_id __mod_##type##__##name##_device_table \ 138extern const typeof(name) __mod_##type##__##name##_device_table \
139 __attribute__ ((unused, alias(__stringify(name)))) 139 __attribute__ ((unused, alias(__stringify(name))))
140#else /* !MODULE */ 140#else /* !MODULE */
141#define MODULE_DEVICE_TABLE(type, name) 141#define MODULE_DEVICE_TABLE(type, name)
@@ -444,7 +444,7 @@ extern void __module_put_and_exit(struct module *mod, long code)
444#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code) 444#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
445 445
446#ifdef CONFIG_MODULE_UNLOAD 446#ifdef CONFIG_MODULE_UNLOAD
447unsigned long module_refcount(struct module *mod); 447int module_refcount(struct module *mod);
448void __symbol_put(const char *symbol); 448void __symbol_put(const char *symbol);
449#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x)) 449#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
450void symbol_put_addr(void *addr); 450void symbol_put_addr(void *addr);
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 7eeb9bbfb816..f7556261fe3c 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -26,7 +26,7 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
26void *module_alloc(unsigned long size); 26void *module_alloc(unsigned long size);
27 27
28/* Free memory returned from module_alloc. */ 28/* Free memory returned from module_alloc. */
29void module_free(struct module *mod, void *module_region); 29void module_memfree(void *module_region);
30 30
31/* 31/*
32 * Apply the given relocation to the (simplified) ELF. Return -error 32 * Apply the given relocation to the (simplified) ELF. Return -error
@@ -82,4 +82,6 @@ int module_finalize(const Elf_Ehdr *hdr,
82/* Any cleanup needed when module leaves. */ 82/* Any cleanup needed when module leaves. */
83void module_arch_cleanup(struct module *mod); 83void module_arch_cleanup(struct module *mod);
84 84
85/* Any cleanup before freeing mod->module_init */
86void module_arch_freeing_init(struct module *mod);
85#endif 87#endif
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 031ff3a9a0bd..3301c4c289d6 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -408,4 +408,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
408 return mtd_is_bitflip(err) || mtd_is_eccerr(err); 408 return mtd_is_bitflip(err) || mtd_is_eccerr(err);
409} 409}
410 410
411unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
412
411#endif /* __MTD_MTD_H__ */ 413#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index c3918a0684fe..1e271cb559cd 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -23,22 +23,32 @@
23 23
24#include <linux/ioctl.h> 24#include <linux/ioctl.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/scatterlist.h>
26#include <mtd/ubi-user.h> 27#include <mtd/ubi-user.h>
27 28
28/* All voumes/LEBs */ 29/* All voumes/LEBs */
29#define UBI_ALL -1 30#define UBI_ALL -1
30 31
31/* 32/*
33 * Maximum number of scatter gather list entries,
34 * we use only 64 to have a lower memory foot print.
35 */
36#define UBI_MAX_SG_COUNT 64
37
38/*
32 * enum ubi_open_mode - UBI volume open mode constants. 39 * enum ubi_open_mode - UBI volume open mode constants.
33 * 40 *
34 * UBI_READONLY: read-only mode 41 * UBI_READONLY: read-only mode
35 * UBI_READWRITE: read-write mode 42 * UBI_READWRITE: read-write mode
36 * UBI_EXCLUSIVE: exclusive mode 43 * UBI_EXCLUSIVE: exclusive mode
44 * UBI_METAONLY: modify only the volume meta-data,
45 * i.e. the data stored in the volume table, but not in any of volume LEBs.
37 */ 46 */
38enum { 47enum {
39 UBI_READONLY = 1, 48 UBI_READONLY = 1,
40 UBI_READWRITE, 49 UBI_READWRITE,
41 UBI_EXCLUSIVE 50 UBI_EXCLUSIVE,
51 UBI_METAONLY
42}; 52};
43 53
44/** 54/**
@@ -116,6 +126,35 @@ struct ubi_volume_info {
116}; 126};
117 127
118/** 128/**
129 * struct ubi_sgl - UBI scatter gather list data structure.
130 * @list_pos: current position in @sg[]
131 * @page_pos: current position in @sg[@list_pos]
132 * @sg: the scatter gather list itself
133 *
134 * ubi_sgl is a wrapper around a scatter list which keeps track of the
135 * current position in the list and the current list item such that
136 * it can be used across multiple ubi_leb_read_sg() calls.
137 */
138struct ubi_sgl {
139 int list_pos;
140 int page_pos;
141 struct scatterlist sg[UBI_MAX_SG_COUNT];
142};
143
144/**
145 * ubi_sgl_init - initialize an UBI scatter gather list data structure.
146 * @usgl: the UBI scatter gather struct itself
147 *
148 * Please note that you still have to use sg_init_table() or any adequate
149 * function to initialize the unterlaying struct scatterlist.
150 */
151static inline void ubi_sgl_init(struct ubi_sgl *usgl)
152{
153 usgl->list_pos = 0;
154 usgl->page_pos = 0;
155}
156
157/**
119 * struct ubi_device_info - UBI device description data structure. 158 * struct ubi_device_info - UBI device description data structure.
120 * @ubi_num: ubi device number 159 * @ubi_num: ubi device number
121 * @leb_size: logical eraseblock size on this UBI device 160 * @leb_size: logical eraseblock size on this UBI device
@@ -210,6 +249,8 @@ int ubi_unregister_volume_notifier(struct notifier_block *nb);
210void ubi_close_volume(struct ubi_volume_desc *desc); 249void ubi_close_volume(struct ubi_volume_desc *desc);
211int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, 250int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
212 int len, int check); 251 int len, int check);
252int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
253 int offset, int len, int check);
213int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, 254int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
214 int offset, int len); 255 int offset, int len);
215int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, 256int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
@@ -230,4 +271,14 @@ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
230{ 271{
231 return ubi_leb_read(desc, lnum, buf, offset, len, 0); 272 return ubi_leb_read(desc, lnum, buf, offset, len, 0);
232} 273}
274
275/*
276 * This function is the same as the 'ubi_leb_read_sg()' function, but it does
277 * not provide the checking capability.
278 */
279static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
280 struct ubi_sgl *sgl, int offset, int len)
281{
282 return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
283}
233#endif /* !__LINUX_UBI_H__ */ 284#endif /* !__LINUX_UBI_H__ */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index cc31498fc526..2cb7531e7d7a 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -59,7 +59,6 @@ struct mutex {
59 struct optimistic_spin_queue osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name;
63 void *magic; 62 void *magic;
64#endif 63#endif
65#ifdef CONFIG_DEBUG_LOCK_ALLOC 64#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 8e30685affeb..7d59dc6ab789 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,6 +66,7 @@ enum {
66 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ 66 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ 67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */ 68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */
69 NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
69 70
70 /* 71 /*
71 * Add your fresh new feature above and remember to update 72 * Add your fresh new feature above and remember to update
@@ -124,6 +125,7 @@ enum {
124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 125#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
125#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) 126#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
126#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) 127#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
128#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
127 129
128/* Features valid for ethtool to change */ 130/* Features valid for ethtool to change */
129/* = all defined minus driver/device-class-related */ 131/* = all defined minus driver/device-class-related */
@@ -159,7 +161,9 @@ enum {
159 */ 161 */
160#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ 162#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
161 NETIF_F_SG | NETIF_F_HIGHDMA | \ 163 NETIF_F_SG | NETIF_F_HIGHDMA | \
162 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) 164 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
165 NETIF_F_HW_SWITCH_OFFLOAD)
166
163/* 167/*
164 * If one device doesn't support one of these features, then disable it 168 * If one device doesn't support one of these features, then disable it
165 * for all in netdev_increment_features. 169 * for all in netdev_increment_features.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 679e6e90aa4c..d115256ed5a2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -51,6 +51,7 @@
51#include <linux/netdev_features.h> 51#include <linux/netdev_features.h>
52#include <linux/neighbour.h> 52#include <linux/neighbour.h>
53#include <uapi/linux/netdevice.h> 53#include <uapi/linux/netdevice.h>
54#include <uapi/linux/if_bonding.h>
54 55
55struct netpoll_info; 56struct netpoll_info;
56struct device; 57struct device;
@@ -643,39 +644,40 @@ struct rps_dev_flow_table {
643/* 644/*
644 * The rps_sock_flow_table contains mappings of flows to the last CPU 645 * The rps_sock_flow_table contains mappings of flows to the last CPU
645 * on which they were processed by the application (set in recvmsg). 646 * on which they were processed by the application (set in recvmsg).
647 * Each entry is a 32bit value. Upper part is the high order bits
648 * of flow hash, lower part is cpu number.
649 * rps_cpu_mask is used to partition the space, depending on number of
650 * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
651 * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
652 * meaning we use 32-6=26 bits for the hash.
646 */ 653 */
647struct rps_sock_flow_table { 654struct rps_sock_flow_table {
648 unsigned int mask; 655 u32 mask;
649 u16 ents[0]; 656
657 u32 ents[0] ____cacheline_aligned_in_smp;
650}; 658};
651#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ 659#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
652 ((_num) * sizeof(u16)))
653 660
654#define RPS_NO_CPU 0xffff 661#define RPS_NO_CPU 0xffff
655 662
663extern u32 rps_cpu_mask;
664extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
665
656static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 666static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
657 u32 hash) 667 u32 hash)
658{ 668{
659 if (table && hash) { 669 if (table && hash) {
660 unsigned int cpu, index = hash & table->mask; 670 unsigned int index = hash & table->mask;
671 u32 val = hash & ~rps_cpu_mask;
661 672
662 /* We only give a hint, preemption can change cpu under us */ 673 /* We only give a hint, preemption can change cpu under us */
663 cpu = raw_smp_processor_id(); 674 val |= raw_smp_processor_id();
664 675
665 if (table->ents[index] != cpu) 676 if (table->ents[index] != val)
666 table->ents[index] = cpu; 677 table->ents[index] = val;
667 } 678 }
668} 679}
669 680
670static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
671 u32 hash)
672{
673 if (table && hash)
674 table->ents[hash & table->mask] = RPS_NO_CPU;
675}
676
677extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
678
679#ifdef CONFIG_RFS_ACCEL 681#ifdef CONFIG_RFS_ACCEL
680bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 682bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
681 u16 filter_id); 683 u16 filter_id);
@@ -852,11 +854,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
852 * 3. Update dev->stats asynchronously and atomically, and define 854 * 3. Update dev->stats asynchronously and atomically, and define
853 * neither operation. 855 * neither operation.
854 * 856 *
855 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); 857 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
856 * If device support VLAN filtering this function is called when a 858 * If device support VLAN filtering this function is called when a
857 * VLAN id is registered. 859 * VLAN id is registered.
858 * 860 *
859 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 861 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
860 * If device support VLAN filtering this function is called when a 862 * If device support VLAN filtering this function is called when a
861 * VLAN id is unregistered. 863 * VLAN id is unregistered.
862 * 864 *
@@ -1154,13 +1156,15 @@ struct net_device_ops {
1154 int idx); 1156 int idx);
1155 1157
1156 int (*ndo_bridge_setlink)(struct net_device *dev, 1158 int (*ndo_bridge_setlink)(struct net_device *dev,
1157 struct nlmsghdr *nlh); 1159 struct nlmsghdr *nlh,
1160 u16 flags);
1158 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1161 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1159 u32 pid, u32 seq, 1162 u32 pid, u32 seq,
1160 struct net_device *dev, 1163 struct net_device *dev,
1161 u32 filter_mask); 1164 u32 filter_mask);
1162 int (*ndo_bridge_dellink)(struct net_device *dev, 1165 int (*ndo_bridge_dellink)(struct net_device *dev,
1163 struct nlmsghdr *nlh); 1166 struct nlmsghdr *nlh,
1167 u16 flags);
1164 int (*ndo_change_carrier)(struct net_device *dev, 1168 int (*ndo_change_carrier)(struct net_device *dev,
1165 bool new_carrier); 1169 bool new_carrier);
1166 int (*ndo_get_phys_port_id)(struct net_device *dev, 1170 int (*ndo_get_phys_port_id)(struct net_device *dev,
@@ -1514,6 +1518,8 @@ struct net_device {
1514 struct list_head napi_list; 1518 struct list_head napi_list;
1515 struct list_head unreg_list; 1519 struct list_head unreg_list;
1516 struct list_head close_list; 1520 struct list_head close_list;
1521 struct list_head ptype_all;
1522 struct list_head ptype_specific;
1517 1523
1518 struct { 1524 struct {
1519 struct list_head upper; 1525 struct list_head upper;
@@ -1969,7 +1975,7 @@ struct offload_callbacks {
1969 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1975 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1970 netdev_features_t features); 1976 netdev_features_t features);
1971 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1977 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1972 struct sk_buff *skb); 1978 struct sk_buff *skb);
1973 int (*gro_complete)(struct sk_buff *skb, int nhoff); 1979 int (*gro_complete)(struct sk_buff *skb, int nhoff);
1974}; 1980};
1975 1981
@@ -1979,10 +1985,21 @@ struct packet_offload {
1979 struct list_head list; 1985 struct list_head list;
1980}; 1986};
1981 1987
1988struct udp_offload;
1989
1990struct udp_offload_callbacks {
1991 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1992 struct sk_buff *skb,
1993 struct udp_offload *uoff);
1994 int (*gro_complete)(struct sk_buff *skb,
1995 int nhoff,
1996 struct udp_offload *uoff);
1997};
1998
1982struct udp_offload { 1999struct udp_offload {
1983 __be16 port; 2000 __be16 port;
1984 u8 ipproto; 2001 u8 ipproto;
1985 struct offload_callbacks callbacks; 2002 struct udp_offload_callbacks callbacks;
1986}; 2003};
1987 2004
1988/* often modified stats are per cpu, other are shared (netdev->stats) */ 2005/* often modified stats are per cpu, other are shared (netdev->stats) */
@@ -2041,6 +2058,7 @@ struct pcpu_sw_netstats {
2041#define NETDEV_RESEND_IGMP 0x0016 2058#define NETDEV_RESEND_IGMP 0x0016
2042#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ 2059#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2043#define NETDEV_CHANGEINFODATA 0x0018 2060#define NETDEV_CHANGEINFODATA 0x0018
2061#define NETDEV_BONDING_INFO 0x0019
2044 2062
2045int register_netdevice_notifier(struct notifier_block *nb); 2063int register_netdevice_notifier(struct notifier_block *nb);
2046int unregister_netdevice_notifier(struct notifier_block *nb); 2064int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2085,7 +2103,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
2085 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2103 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2086#define for_each_netdev_in_bond_rcu(bond, slave) \ 2104#define for_each_netdev_in_bond_rcu(bond, slave) \
2087 for_each_netdev_rcu(&init_net, slave) \ 2105 for_each_netdev_rcu(&init_net, slave) \
2088 if (netdev_master_upper_dev_get_rcu(slave) == bond) 2106 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2089#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2107#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2090 2108
2091static inline struct net_device *next_net_device(struct net_device *dev) 2109static inline struct net_device *next_net_device(struct net_device *dev)
@@ -2303,6 +2321,21 @@ do { \
2303 compute_pseudo(skb, proto)); \ 2321 compute_pseudo(skb, proto)); \
2304} while (0) 2322} while (0)
2305 2323
2324static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2325 int start, int offset)
2326{
2327 __wsum delta;
2328
2329 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2330
2331 delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
2332
2333 /* Adjust skb->csum since we changed the packet */
2334 skb->csum = csum_add(skb->csum, delta);
2335 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2336}
2337
2338
2306static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2339static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2307 unsigned short type, 2340 unsigned short type,
2308 const void *daddr, const void *saddr, 2341 const void *daddr, const void *saddr,
@@ -3464,6 +3497,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3464struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3497struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3465 netdev_features_t features); 3498 netdev_features_t features);
3466 3499
3500struct netdev_bonding_info {
3501 ifslave slave;
3502 ifbond master;
3503};
3504
3505struct netdev_notifier_bonding_info {
3506 struct netdev_notifier_info info; /* must be first */
3507 struct netdev_bonding_info bonding_info;
3508};
3509
3510void netdev_bonding_info_change(struct net_device *dev,
3511 struct netdev_bonding_info *bonding_info);
3512
3467static inline 3513static inline
3468struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 3514struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3469{ 3515{
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 022b761dbf0a..ed43cb74b11d 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -411,6 +411,7 @@ enum lock_type4 {
411#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) 411#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
412#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) 412#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
413#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) 413#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
414#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
414#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) 415#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
415#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) 416#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
416#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) 417#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
@@ -516,6 +517,8 @@ enum pnfs_layouttype {
516 LAYOUT_NFSV4_1_FILES = 1, 517 LAYOUT_NFSV4_1_FILES = 1,
517 LAYOUT_OSD2_OBJECTS = 2, 518 LAYOUT_OSD2_OBJECTS = 2,
518 LAYOUT_BLOCK_VOLUME = 3, 519 LAYOUT_BLOCK_VOLUME = 3,
520 LAYOUT_FLEX_FILES = 4,
521 LAYOUT_TYPE_MAX
519}; 522};
520 523
521/* used for both layout return and recall */ 524/* used for both layout return and recall */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1e37fbb78f7a..5e1273d4de14 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -74,10 +74,9 @@ struct nfs_client {
74 /* idmapper */ 74 /* idmapper */
75 struct idmap * cl_idmap; 75 struct idmap * cl_idmap;
76 76
77 /* Our own IP address, as a null-terminated string. 77 /* Client owner identifier */
78 * This is used to generate the mv0 callback address. 78 const char * cl_owner_id;
79 */ 79
80 char cl_ipaddr[48];
81 u32 cl_cb_ident; /* v4.0 callback identifier */ 80 u32 cl_cb_ident; /* v4.0 callback identifier */
82 const struct nfs4_minor_version_ops *cl_mvops; 81 const struct nfs4_minor_version_ops *cl_mvops;
83 unsigned long cl_mig_gen; 82 unsigned long cl_mig_gen;
@@ -105,6 +104,11 @@ struct nfs_client {
105#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */ 104#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
106#endif /* CONFIG_NFS_V4 */ 105#endif /* CONFIG_NFS_V4 */
107 106
107 /* Our own IP address, as a null-terminated string.
108 * This is used to generate the mv0 callback address.
109 */
110 char cl_ipaddr[48];
111
108#ifdef CONFIG_NFS_FSCACHE 112#ifdef CONFIG_NFS_FSCACHE
109 struct fscache_cookie *fscache; /* client index cache cookie */ 113 struct fscache_cookie *fscache; /* client index cache cookie */
110#endif 114#endif
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
index 0f4b79da6584..333844e38f66 100644
--- a/include/linux/nfs_idmap.h
+++ b/include/linux/nfs_idmap.h
@@ -73,5 +73,7 @@ int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, kgid_t
73int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t); 73int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t);
74int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t); 74int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t);
75 75
76int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res);
77
76extern unsigned int nfs_idmap_cache_timeout; 78extern unsigned int nfs_idmap_cache_timeout;
77#endif /* NFS_IDMAP_H */ 79#endif /* NFS_IDMAP_H */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 6c3e06ee2fb7..3eb072dbce83 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -58,6 +58,9 @@ struct nfs_pageio_ops {
58 size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, 58 size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *,
59 struct nfs_page *); 59 struct nfs_page *);
60 int (*pg_doio)(struct nfs_pageio_descriptor *); 60 int (*pg_doio)(struct nfs_pageio_descriptor *);
61 unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
62 struct nfs_page *);
63 void (*pg_cleanup)(struct nfs_pageio_descriptor *);
61}; 64};
62 65
63struct nfs_rw_ops { 66struct nfs_rw_ops {
@@ -69,18 +72,21 @@ struct nfs_rw_ops {
69 struct inode *); 72 struct inode *);
70 void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); 73 void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *);
71 void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *, 74 void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *,
75 const struct nfs_rpc_ops *,
72 struct rpc_task_setup *, int); 76 struct rpc_task_setup *, int);
73}; 77};
74 78
75struct nfs_pageio_descriptor { 79struct nfs_pgio_mirror {
76 struct list_head pg_list; 80 struct list_head pg_list;
77 unsigned long pg_bytes_written; 81 unsigned long pg_bytes_written;
78 size_t pg_count; 82 size_t pg_count;
79 size_t pg_bsize; 83 size_t pg_bsize;
80 unsigned int pg_base; 84 unsigned int pg_base;
81 unsigned char pg_moreio : 1, 85 unsigned char pg_recoalesce : 1;
82 pg_recoalesce : 1; 86};
83 87
88struct nfs_pageio_descriptor {
89 unsigned char pg_moreio : 1;
84 struct inode *pg_inode; 90 struct inode *pg_inode;
85 const struct nfs_pageio_ops *pg_ops; 91 const struct nfs_pageio_ops *pg_ops;
86 const struct nfs_rw_ops *pg_rw_ops; 92 const struct nfs_rw_ops *pg_rw_ops;
@@ -91,8 +97,18 @@ struct nfs_pageio_descriptor {
91 struct pnfs_layout_segment *pg_lseg; 97 struct pnfs_layout_segment *pg_lseg;
92 struct nfs_direct_req *pg_dreq; 98 struct nfs_direct_req *pg_dreq;
93 void *pg_layout_private; 99 void *pg_layout_private;
100 unsigned int pg_bsize; /* default bsize for mirrors */
101
102 u32 pg_mirror_count;
103 struct nfs_pgio_mirror *pg_mirrors;
104 struct nfs_pgio_mirror pg_mirrors_static[1];
105 struct nfs_pgio_mirror *pg_mirrors_dynamic;
106 u32 pg_mirror_idx; /* current mirror */
94}; 107};
95 108
109/* arbitrarily selected limit to number of mirrors */
110#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16
111
96#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) 112#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
97 113
98extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, 114extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 467c84efb596..38d96ba935c2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -285,6 +285,7 @@ struct nfs4_layoutcommit_data {
285 struct nfs_fattr fattr; 285 struct nfs_fattr fattr;
286 struct list_head lseg_list; 286 struct list_head lseg_list;
287 struct rpc_cred *cred; 287 struct rpc_cred *cred;
288 struct inode *inode;
288 struct nfs4_layoutcommit_args args; 289 struct nfs4_layoutcommit_args args;
289 struct nfs4_layoutcommit_res res; 290 struct nfs4_layoutcommit_res res;
290}; 291};
@@ -293,6 +294,7 @@ struct nfs4_layoutreturn_args {
293 struct nfs4_sequence_args seq_args; 294 struct nfs4_sequence_args seq_args;
294 struct pnfs_layout_hdr *layout; 295 struct pnfs_layout_hdr *layout;
295 struct inode *inode; 296 struct inode *inode;
297 struct pnfs_layout_range range;
296 nfs4_stateid stateid; 298 nfs4_stateid stateid;
297 __u32 layout_type; 299 __u32 layout_type;
298}; 300};
@@ -308,6 +310,7 @@ struct nfs4_layoutreturn {
308 struct nfs4_layoutreturn_res res; 310 struct nfs4_layoutreturn_res res;
309 struct rpc_cred *cred; 311 struct rpc_cred *cred;
310 struct nfs_client *clp; 312 struct nfs_client *clp;
313 struct inode *inode;
311 int rpc_status; 314 int rpc_status;
312}; 315};
313 316
@@ -325,6 +328,7 @@ struct nfs_openargs {
325 struct nfs_seqid * seqid; 328 struct nfs_seqid * seqid;
326 int open_flags; 329 int open_flags;
327 fmode_t fmode; 330 fmode_t fmode;
331 u32 share_access;
328 u32 access; 332 u32 access;
329 __u64 clientid; 333 __u64 clientid;
330 struct stateowner_id id; 334 struct stateowner_id id;
@@ -389,9 +393,10 @@ struct nfs_open_confirmres {
389struct nfs_closeargs { 393struct nfs_closeargs {
390 struct nfs4_sequence_args seq_args; 394 struct nfs4_sequence_args seq_args;
391 struct nfs_fh * fh; 395 struct nfs_fh * fh;
392 nfs4_stateid * stateid; 396 nfs4_stateid stateid;
393 struct nfs_seqid * seqid; 397 struct nfs_seqid * seqid;
394 fmode_t fmode; 398 fmode_t fmode;
399 u32 share_access;
395 const u32 * bitmask; 400 const u32 * bitmask;
396}; 401};
397 402
@@ -416,12 +421,13 @@ struct nfs_lock_args {
416 struct nfs_fh * fh; 421 struct nfs_fh * fh;
417 struct file_lock * fl; 422 struct file_lock * fl;
418 struct nfs_seqid * lock_seqid; 423 struct nfs_seqid * lock_seqid;
419 nfs4_stateid * lock_stateid; 424 nfs4_stateid lock_stateid;
420 struct nfs_seqid * open_seqid; 425 struct nfs_seqid * open_seqid;
421 nfs4_stateid * open_stateid; 426 nfs4_stateid open_stateid;
422 struct nfs_lowner lock_owner; 427 struct nfs_lowner lock_owner;
423 unsigned char block : 1; 428 unsigned char block : 1;
424 unsigned char reclaim : 1; 429 unsigned char reclaim : 1;
430 unsigned char new_lock : 1;
425 unsigned char new_lock_owner : 1; 431 unsigned char new_lock_owner : 1;
426}; 432};
427 433
@@ -437,7 +443,7 @@ struct nfs_locku_args {
437 struct nfs_fh * fh; 443 struct nfs_fh * fh;
438 struct file_lock * fl; 444 struct file_lock * fl;
439 struct nfs_seqid * seqid; 445 struct nfs_seqid * seqid;
440 nfs4_stateid * stateid; 446 nfs4_stateid stateid;
441}; 447};
442 448
443struct nfs_locku_res { 449struct nfs_locku_res {
@@ -513,6 +519,7 @@ struct nfs_pgio_res {
513 struct nfs4_sequence_res seq_res; 519 struct nfs4_sequence_res seq_res;
514 struct nfs_fattr * fattr; 520 struct nfs_fattr * fattr;
515 __u32 count; 521 __u32 count;
522 __u32 op_status;
516 int eof; /* used by read */ 523 int eof; /* used by read */
517 struct nfs_writeverf * verf; /* used by write */ 524 struct nfs_writeverf * verf; /* used by write */
518 const struct nfs_server *server; /* used by write */ 525 const struct nfs_server *server; /* used by write */
@@ -532,6 +539,7 @@ struct nfs_commitargs {
532 539
533struct nfs_commitres { 540struct nfs_commitres {
534 struct nfs4_sequence_res seq_res; 541 struct nfs4_sequence_res seq_res;
542 __u32 op_status;
535 struct nfs_fattr *fattr; 543 struct nfs_fattr *fattr;
536 struct nfs_writeverf *verf; 544 struct nfs_writeverf *verf;
537 const struct nfs_server *server; 545 const struct nfs_server *server;
@@ -1325,7 +1333,8 @@ struct nfs_pgio_header {
1325 __u64 mds_offset; /* Filelayout dense stripe */ 1333 __u64 mds_offset; /* Filelayout dense stripe */
1326 struct nfs_page_array page_array; 1334 struct nfs_page_array page_array;
1327 struct nfs_client *ds_clp; /* pNFS data server */ 1335 struct nfs_client *ds_clp; /* pNFS data server */
1328 int ds_idx; /* ds index if ds_clp is set */ 1336 int ds_commit_idx; /* ds index if ds_clp is set */
1337 int pgio_mirror_idx;/* mirror index in pgio layer */
1329}; 1338};
1330 1339
1331struct nfs_mds_commit_info { 1340struct nfs_mds_commit_info {
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 83a6aeda899d..6e85889cf9ab 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -8,14 +8,13 @@
8 * See detailed comments in the file linux/bitmap.h describing the 8 * See detailed comments in the file linux/bitmap.h describing the
9 * data type on which these nodemasks are based. 9 * data type on which these nodemasks are based.
10 * 10 *
11 * For details of nodemask_scnprintf() and nodemask_parse_user(), 11 * For details of nodemask_parse_user(), see bitmap_parse_user() in
12 * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c. 12 * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(),
13 * For details of nodelist_scnprintf() and nodelist_parse(), see 13 * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in
14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. 14 * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in
15 * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c. 15 * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in
16 * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c. 16 * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in
17 * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c. 17 * lib/bitmap.c.
18 * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c.
19 * 18 *
20 * The available nodemask operations are: 19 * The available nodemask operations are:
21 * 20 *
@@ -52,9 +51,7 @@
52 * NODE_MASK_NONE Initializer - no bits set 51 * NODE_MASK_NONE Initializer - no bits set
53 * unsigned long *nodes_addr(mask) Array of unsigned long's in mask 52 * unsigned long *nodes_addr(mask) Array of unsigned long's in mask
54 * 53 *
55 * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing
56 * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask 54 * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask
57 * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
58 * int nodelist_parse(buf, map) Parse ascii string as nodelist 55 * int nodelist_parse(buf, map) Parse ascii string as nodelist
59 * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) 56 * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
60 * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) 57 * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
@@ -98,6 +95,14 @@
98typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; 95typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
99extern nodemask_t _unused_nodemask_arg_; 96extern nodemask_t _unused_nodemask_arg_;
100 97
98/**
99 * nodemask_pr_args - printf args to output a nodemask
100 * @maskp: nodemask to be printed
101 *
102 * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
103 */
104#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits
105
101/* 106/*
102 * The inline keyword gives the compiler room to decide to inline, or 107 * The inline keyword gives the compiler room to decide to inline, or
103 * not inline a function as it sees best. However, as these functions 108 * not inline a function as it sees best. However, as these functions
@@ -120,13 +125,13 @@ static inline void __node_clear(int node, volatile nodemask_t *dstp)
120} 125}
121 126
122#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) 127#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
123static inline void __nodes_setall(nodemask_t *dstp, int nbits) 128static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
124{ 129{
125 bitmap_fill(dstp->bits, nbits); 130 bitmap_fill(dstp->bits, nbits);
126} 131}
127 132
128#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) 133#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
129static inline void __nodes_clear(nodemask_t *dstp, int nbits) 134static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
130{ 135{
131 bitmap_zero(dstp->bits, nbits); 136 bitmap_zero(dstp->bits, nbits);
132} 137}
@@ -144,7 +149,7 @@ static inline int __node_test_and_set(int node, nodemask_t *addr)
144#define nodes_and(dst, src1, src2) \ 149#define nodes_and(dst, src1, src2) \
145 __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) 150 __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
146static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, 151static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
147 const nodemask_t *src2p, int nbits) 152 const nodemask_t *src2p, unsigned int nbits)
148{ 153{
149 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); 154 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
150} 155}
@@ -152,7 +157,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
152#define nodes_or(dst, src1, src2) \ 157#define nodes_or(dst, src1, src2) \
153 __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) 158 __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
154static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, 159static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
155 const nodemask_t *src2p, int nbits) 160 const nodemask_t *src2p, unsigned int nbits)
156{ 161{
157 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); 162 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
158} 163}
@@ -160,7 +165,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
160#define nodes_xor(dst, src1, src2) \ 165#define nodes_xor(dst, src1, src2) \
161 __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) 166 __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
162static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, 167static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
163 const nodemask_t *src2p, int nbits) 168 const nodemask_t *src2p, unsigned int nbits)
164{ 169{
165 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); 170 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
166} 171}
@@ -168,7 +173,7 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
168#define nodes_andnot(dst, src1, src2) \ 173#define nodes_andnot(dst, src1, src2) \
169 __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) 174 __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
170static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, 175static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
171 const nodemask_t *src2p, int nbits) 176 const nodemask_t *src2p, unsigned int nbits)
172{ 177{
173 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); 178 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
174} 179}
@@ -176,7 +181,7 @@ static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
176#define nodes_complement(dst, src) \ 181#define nodes_complement(dst, src) \
177 __nodes_complement(&(dst), &(src), MAX_NUMNODES) 182 __nodes_complement(&(dst), &(src), MAX_NUMNODES)
178static inline void __nodes_complement(nodemask_t *dstp, 183static inline void __nodes_complement(nodemask_t *dstp,
179 const nodemask_t *srcp, int nbits) 184 const nodemask_t *srcp, unsigned int nbits)
180{ 185{
181 bitmap_complement(dstp->bits, srcp->bits, nbits); 186 bitmap_complement(dstp->bits, srcp->bits, nbits);
182} 187}
@@ -184,7 +189,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
184#define nodes_equal(src1, src2) \ 189#define nodes_equal(src1, src2) \
185 __nodes_equal(&(src1), &(src2), MAX_NUMNODES) 190 __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
186static inline int __nodes_equal(const nodemask_t *src1p, 191static inline int __nodes_equal(const nodemask_t *src1p,
187 const nodemask_t *src2p, int nbits) 192 const nodemask_t *src2p, unsigned int nbits)
188{ 193{
189 return bitmap_equal(src1p->bits, src2p->bits, nbits); 194 return bitmap_equal(src1p->bits, src2p->bits, nbits);
190} 195}
@@ -192,7 +197,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
192#define nodes_intersects(src1, src2) \ 197#define nodes_intersects(src1, src2) \
193 __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) 198 __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
194static inline int __nodes_intersects(const nodemask_t *src1p, 199static inline int __nodes_intersects(const nodemask_t *src1p,
195 const nodemask_t *src2p, int nbits) 200 const nodemask_t *src2p, unsigned int nbits)
196{ 201{
197 return bitmap_intersects(src1p->bits, src2p->bits, nbits); 202 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
198} 203}
@@ -200,25 +205,25 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
200#define nodes_subset(src1, src2) \ 205#define nodes_subset(src1, src2) \
201 __nodes_subset(&(src1), &(src2), MAX_NUMNODES) 206 __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
202static inline int __nodes_subset(const nodemask_t *src1p, 207static inline int __nodes_subset(const nodemask_t *src1p,
203 const nodemask_t *src2p, int nbits) 208 const nodemask_t *src2p, unsigned int nbits)
204{ 209{
205 return bitmap_subset(src1p->bits, src2p->bits, nbits); 210 return bitmap_subset(src1p->bits, src2p->bits, nbits);
206} 211}
207 212
208#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) 213#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
209static inline int __nodes_empty(const nodemask_t *srcp, int nbits) 214static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
210{ 215{
211 return bitmap_empty(srcp->bits, nbits); 216 return bitmap_empty(srcp->bits, nbits);
212} 217}
213 218
214#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) 219#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
215static inline int __nodes_full(const nodemask_t *srcp, int nbits) 220static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
216{ 221{
217 return bitmap_full(srcp->bits, nbits); 222 return bitmap_full(srcp->bits, nbits);
218} 223}
219 224
220#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) 225#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
221static inline int __nodes_weight(const nodemask_t *srcp, int nbits) 226static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
222{ 227{
223 return bitmap_weight(srcp->bits, nbits); 228 return bitmap_weight(srcp->bits, nbits);
224} 229}
@@ -304,14 +309,6 @@ static inline int __first_unset_node(const nodemask_t *maskp)
304 309
305#define nodes_addr(src) ((src).bits) 310#define nodes_addr(src) ((src).bits)
306 311
307#define nodemask_scnprintf(buf, len, src) \
308 __nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES)
309static inline int __nodemask_scnprintf(char *buf, int len,
310 const nodemask_t *srcp, int nbits)
311{
312 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
313}
314
315#define nodemask_parse_user(ubuf, ulen, dst) \ 312#define nodemask_parse_user(ubuf, ulen, dst) \
316 __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) 313 __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
317static inline int __nodemask_parse_user(const char __user *buf, int len, 314static inline int __nodemask_parse_user(const char __user *buf, int len,
@@ -320,14 +317,6 @@ static inline int __nodemask_parse_user(const char __user *buf, int len,
320 return bitmap_parse_user(buf, len, dstp->bits, nbits); 317 return bitmap_parse_user(buf, len, dstp->bits, nbits);
321} 318}
322 319
323#define nodelist_scnprintf(buf, len, src) \
324 __nodelist_scnprintf((buf), (len), &(src), MAX_NUMNODES)
325static inline int __nodelist_scnprintf(char *buf, int len,
326 const nodemask_t *srcp, int nbits)
327{
328 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
329}
330
331#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) 320#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
332static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) 321static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
333{ 322{
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 258945fcabf1..19a5d4b23209 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -132,13 +132,12 @@ struct nvme_ns {
132 * allocated to store the PRP list. 132 * allocated to store the PRP list.
133 */ 133 */
134struct nvme_iod { 134struct nvme_iod {
135 void *private; /* For the use of the submitter of the I/O */ 135 unsigned long private; /* For the use of the submitter of the I/O */
136 int npages; /* In the PRP list. 0 means small pool in use */ 136 int npages; /* In the PRP list. 0 means small pool in use */
137 int offset; /* Of PRP list */ 137 int offset; /* Of PRP list */
138 int nents; /* Used in scatterlist */ 138 int nents; /* Used in scatterlist */
139 int length; /* Of data, in bytes */ 139 int length; /* Of data, in bytes */
140 dma_addr_t first_dma; 140 dma_addr_t first_dma;
141 struct list_head node;
142 struct scatterlist sg[0]; 141 struct scatterlist sg[0];
143}; 142};
144 143
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 38fc05036015..69dbe312b11b 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -52,6 +52,7 @@ extern int of_get_named_gpio_flags(struct device_node *np,
52 52
53extern int of_mm_gpiochip_add(struct device_node *np, 53extern int of_mm_gpiochip_add(struct device_node *np,
54 struct of_mm_gpio_chip *mm_gc); 54 struct of_mm_gpio_chip *mm_gc);
55extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
55 56
56extern void of_gpiochip_add(struct gpio_chip *gc); 57extern void of_gpiochip_add(struct gpio_chip *gc);
57extern void of_gpiochip_remove(struct gpio_chip *gc); 58extern void of_gpiochip_remove(struct gpio_chip *gc);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 853698c721f7..d5771bed59c9 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -47,6 +47,10 @@ static inline bool oom_task_origin(const struct task_struct *p)
47 return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); 47 return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
48} 48}
49 49
50extern void mark_tsk_oom_victim(struct task_struct *tsk);
51
52extern void unmark_oom_victim(void);
53
50extern unsigned long oom_badness(struct task_struct *p, 54extern unsigned long oom_badness(struct task_struct *p,
51 struct mem_cgroup *memcg, const nodemask_t *nodemask, 55 struct mem_cgroup *memcg, const nodemask_t *nodemask,
52 unsigned long totalpages); 56 unsigned long totalpages);
@@ -68,27 +72,14 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
68 unsigned long totalpages, const nodemask_t *nodemask, 72 unsigned long totalpages, const nodemask_t *nodemask,
69 bool force_kill); 73 bool force_kill);
70 74
71extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 75extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
72 int order, nodemask_t *mask, bool force_kill); 76 int order, nodemask_t *mask, bool force_kill);
73extern int register_oom_notifier(struct notifier_block *nb); 77extern int register_oom_notifier(struct notifier_block *nb);
74extern int unregister_oom_notifier(struct notifier_block *nb); 78extern int unregister_oom_notifier(struct notifier_block *nb);
75 79
76extern bool oom_killer_disabled; 80extern bool oom_killer_disabled;
77 81extern bool oom_killer_disable(void);
78static inline void oom_killer_disable(void) 82extern void oom_killer_enable(void);
79{
80 oom_killer_disabled = true;
81}
82
83static inline void oom_killer_enable(void)
84{
85 oom_killer_disabled = false;
86}
87
88static inline bool oom_gfp_allowed(gfp_t gfp_mask)
89{
90 return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
91}
92 83
93extern struct task_struct *find_lock_task_mm(struct task_struct *p); 84extern struct task_struct *find_lock_task_mm(struct task_struct *p);
94 85
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 90230d5811c5..3a6490e81b28 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -5,8 +5,11 @@
5 * An MCS like lock especially tailored for optimistic spinning for sleeping 5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc). 6 * lock implementations (mutex, rwsem, etc).
7 */ 7 */
8 8struct optimistic_spin_node {
9#define OSQ_UNLOCKED_VAL (0) 9 struct optimistic_spin_node *next, *prev;
10 int locked; /* 1 if lock acquired */
11 int cpu; /* encoded CPU # + 1 value */
12};
10 13
11struct optimistic_spin_queue { 14struct optimistic_spin_queue {
12 /* 15 /*
@@ -16,6 +19,8 @@ struct optimistic_spin_queue {
16 atomic_t tail; 19 atomic_t tail;
17}; 20};
18 21
22#define OSQ_UNLOCKED_VAL (0)
23
19/* Init macro and function. */ 24/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } 25#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21 26
@@ -24,4 +29,7 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); 29 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25} 30}
26 31
32extern bool osq_lock(struct optimistic_spin_queue *lock);
33extern void osq_unlock(struct optimistic_spin_queue *lock);
34
27#endif 35#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e1f5fcd79792..5ed7bdaf22d5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -121,8 +121,12 @@ enum pageflags {
121 PG_fscache = PG_private_2, /* page backed by cache */ 121 PG_fscache = PG_private_2, /* page backed by cache */
122 122
123 /* XEN */ 123 /* XEN */
124 /* Pinned in Xen as a read-only pagetable page. */
124 PG_pinned = PG_owner_priv_1, 125 PG_pinned = PG_owner_priv_1,
126 /* Pinned as part of domain save (see xen_mm_pin_all()). */
125 PG_savepinned = PG_dirty, 127 PG_savepinned = PG_dirty,
128 /* Has a grant mapping of another (foreign) domain's page. */
129 PG_foreign = PG_owner_priv_1,
126 130
127 /* SLOB */ 131 /* SLOB */
128 PG_slob_free = PG_private, 132 PG_slob_free = PG_private,
@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab)
215PAGEFLAG(Checked, checked) /* Used by some filesystems */ 219PAGEFLAG(Checked, checked) /* Used by some filesystems */
216PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 220PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
217PAGEFLAG(SavePinned, savepinned); /* Xen */ 221PAGEFLAG(SavePinned, savepinned); /* Xen */
222PAGEFLAG(Foreign, foreign); /* Xen */
218PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 223PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
219PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 224PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
220 __SETPAGEFLAG(SwapBacked, swapbacked) 225 __SETPAGEFLAG(SwapBacked, swapbacked)
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 955421575d16..17fa4f8de3a6 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -41,7 +41,8 @@ int page_counter_try_charge(struct page_counter *counter,
41 struct page_counter **fail); 41 struct page_counter **fail);
42void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 42void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
43int page_counter_limit(struct page_counter *counter, unsigned long limit); 43int page_counter_limit(struct page_counter *counter, unsigned long limit);
44int page_counter_memparse(const char *buf, unsigned long *nr_pages); 44int page_counter_memparse(const char *buf, const char *max,
45 unsigned long *nr_pages);
45 46
46static inline void page_counter_reset_watermark(struct page_counter *counter) 47static inline void page_counter_reset_watermark(struct page_counter *counter)
47{ 48{
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index d2a2c84c72d0..c42981cd99aa 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -40,7 +40,7 @@ struct page_ext {
40#ifdef CONFIG_PAGE_OWNER 40#ifdef CONFIG_PAGE_OWNER
41 unsigned int order; 41 unsigned int order;
42 gfp_t gfp_mask; 42 gfp_t gfp_mask;
43 struct stack_trace trace; 43 unsigned int nr_entries;
44 unsigned long trace_entries[8]; 44 unsigned long trace_entries[8];
45#endif 45#endif
46}; 46};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 360a966a97a5..211e9da8a7d7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -29,6 +29,7 @@
29#include <linux/atomic.h> 29#include <linux/atomic.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/resource_ext.h>
32#include <uapi/linux/pci.h> 33#include <uapi/linux/pci.h>
33 34
34#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
@@ -175,6 +176,10 @@ enum pci_dev_flags {
175 PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), 176 PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
176 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ 177 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
177 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), 178 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
179 /* Do not use bus resets for device */
180 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
181 /* Do not use PM reset even if device advertises NoSoftRst- */
182 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
178}; 183};
179 184
180enum pci_irq_reroute_variant { 185enum pci_irq_reroute_variant {
@@ -395,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
395 return (pdev->error_state != pci_channel_io_normal); 400 return (pdev->error_state != pci_channel_io_normal);
396} 401}
397 402
398struct pci_host_bridge_window {
399 struct list_head list;
400 struct resource *res; /* host bridge aperture (CPU address) */
401 resource_size_t offset; /* bus address + offset = CPU address */
402};
403
404struct pci_host_bridge { 403struct pci_host_bridge {
405 struct device dev; 404 struct device dev;
406 struct pci_bus *bus; /* root bus */ 405 struct pci_bus *bus; /* root bus */
407 struct list_head windows; /* pci_host_bridge_windows */ 406 struct list_head windows; /* resource_entry */
408 void (*release_fn)(struct pci_host_bridge *); 407 void (*release_fn)(struct pci_host_bridge *);
409 void *release_data; 408 void *release_data;
410}; 409};
@@ -560,6 +559,7 @@ static inline int pcibios_err_to_errno(int err)
560/* Low-level architecture-dependent routines */ 559/* Low-level architecture-dependent routines */
561 560
562struct pci_ops { 561struct pci_ops {
562 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
563 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 563 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
564 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 564 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
565}; 565};
@@ -857,6 +857,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
857 int where, u16 val); 857 int where, u16 val);
858int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, 858int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
859 int where, u32 val); 859 int where, u32 val);
860
861int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
862 int where, int size, u32 *val);
863int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
864 int where, int size, u32 val);
865int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
866 int where, int size, u32 *val);
867int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
868 int where, int size, u32 val);
869
860struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); 870struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
861 871
862static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) 872static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
@@ -1065,6 +1075,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1065void pci_bus_assign_resources(const struct pci_bus *bus); 1075void pci_bus_assign_resources(const struct pci_bus *bus);
1066void pci_bus_size_bridges(struct pci_bus *bus); 1076void pci_bus_size_bridges(struct pci_bus *bus);
1067int pci_claim_resource(struct pci_dev *, int); 1077int pci_claim_resource(struct pci_dev *, int);
1078int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1068void pci_assign_unassigned_resources(void); 1079void pci_assign_unassigned_resources(void);
1069void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); 1080void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1070void pci_assign_unassigned_bus_resources(struct pci_bus *bus); 1081void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
@@ -1847,6 +1858,8 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
1847static inline void pci_release_of_node(struct pci_dev *dev) { } 1858static inline void pci_release_of_node(struct pci_dev *dev) { }
1848static inline void pci_set_bus_of_node(struct pci_bus *bus) { } 1859static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
1849static inline void pci_release_bus_of_node(struct pci_bus *bus) { } 1860static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
1861static inline struct device_node *
1862pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
1850#endif /* CONFIG_OF */ 1863#endif /* CONFIG_OF */
1851 1864
1852#ifdef CONFIG_EEH 1865#ifdef CONFIG_EEH
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b4337646388b..12c9b485beb7 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
128static inline bool __ref_is_percpu(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp) 129 unsigned long __percpu **percpu_countp)
130{ 130{
131 /* paired with smp_store_release() in percpu_ref_reinit() */ 131 unsigned long percpu_ptr;
132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); 132
133 /*
134 * The value of @ref->percpu_count_ptr is tested for
135 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
136 * used as a pointer. If the compiler generates a separate fetch
137 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
138 * between contaminating the pointer value, meaning that
139 * ACCESS_ONCE() is required when fetching it.
140 *
141 * Also, we need a data dependency barrier to be paired with
142 * smp_store_release() in __percpu_ref_switch_to_percpu().
143 *
144 * Use lockless deref which contains both.
145 */
146 percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
133 147
134 /* 148 /*
135 * Theoretically, the following could test just ATOMIC; however, 149 * Theoretically, the following could test just ATOMIC; however,
@@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
233 if (__ref_is_percpu(ref, &percpu_count)) { 247 if (__ref_is_percpu(ref, &percpu_count)) {
234 this_cpu_inc(*percpu_count); 248 this_cpu_inc(*percpu_count);
235 ret = true; 249 ret = true;
236 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { 250 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
237 ret = atomic_long_inc_not_zero(&ref->count); 251 ret = atomic_long_inc_not_zero(&ref->count);
238 } 252 }
239 253
@@ -281,6 +295,20 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
281} 295}
282 296
283/** 297/**
298 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
299 * @ref: percpu_ref to test
300 *
301 * Returns %true if @ref is dying or dead.
302 *
303 * This function is safe to call as long as @ref is between init and exit
304 * and the caller is responsible for synchronizing against state changes.
305 */
306static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
307{
308 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
309}
310
311/**
284 * percpu_ref_is_zero - test whether a percpu refcount reached zero 312 * percpu_ref_is_zero - test whether a percpu refcount reached zero
285 * @ref: percpu_ref to test 313 * @ref: percpu_ref to test
286 * 314 *
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 486e84ccb1f9..2b621982938d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -79,11 +79,6 @@ struct perf_branch_stack {
79 struct perf_branch_entry entries[0]; 79 struct perf_branch_entry entries[0];
80}; 80};
81 81
82struct perf_regs {
83 __u64 abi;
84 struct pt_regs *regs;
85};
86
87struct task_struct; 82struct task_struct;
88 83
89/* 84/*
@@ -207,6 +202,13 @@ struct pmu {
207 */ 202 */
208 int (*event_init) (struct perf_event *event); 203 int (*event_init) (struct perf_event *event);
209 204
205 /*
206 * Notification that the event was mapped or unmapped. Called
207 * in the context of the mapping task.
208 */
209 void (*event_mapped) (struct perf_event *event); /*optional*/
210 void (*event_unmapped) (struct perf_event *event); /*optional*/
211
210#define PERF_EF_START 0x01 /* start the counter when adding */ 212#define PERF_EF_START 0x01 /* start the counter when adding */
211#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ 213#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
212#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ 214#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
@@ -455,11 +457,6 @@ struct perf_event {
455#endif /* CONFIG_PERF_EVENTS */ 457#endif /* CONFIG_PERF_EVENTS */
456}; 458};
457 459
458enum perf_event_context_type {
459 task_context,
460 cpu_context,
461};
462
463/** 460/**
464 * struct perf_event_context - event context structure 461 * struct perf_event_context - event context structure
465 * 462 *
@@ -467,7 +464,6 @@ enum perf_event_context_type {
467 */ 464 */
468struct perf_event_context { 465struct perf_event_context {
469 struct pmu *pmu; 466 struct pmu *pmu;
470 enum perf_event_context_type type;
471 /* 467 /*
472 * Protect the states of the events in the list, 468 * Protect the states of the events in the list,
473 * nr_active, and the list: 469 * nr_active, and the list:
@@ -480,6 +476,7 @@ struct perf_event_context {
480 */ 476 */
481 struct mutex mutex; 477 struct mutex mutex;
482 478
479 struct list_head active_ctx_list;
483 struct list_head pinned_groups; 480 struct list_head pinned_groups;
484 struct list_head flexible_groups; 481 struct list_head flexible_groups;
485 struct list_head event_list; 482 struct list_head event_list;
@@ -530,7 +527,6 @@ struct perf_cpu_context {
530 int exclusive; 527 int exclusive;
531 struct hrtimer hrtimer; 528 struct hrtimer hrtimer;
532 ktime_t hrtimer_interval; 529 ktime_t hrtimer_interval;
533 struct list_head rotation_list;
534 struct pmu *unique_pmu; 530 struct pmu *unique_pmu;
535 struct perf_cgroup *cgrp; 531 struct perf_cgroup *cgrp;
536}; 532};
@@ -610,7 +606,14 @@ struct perf_sample_data {
610 u32 reserved; 606 u32 reserved;
611 } cpu_entry; 607 } cpu_entry;
612 struct perf_callchain_entry *callchain; 608 struct perf_callchain_entry *callchain;
609
610 /*
611 * regs_user may point to task_pt_regs or to regs_user_copy, depending
612 * on arch details.
613 */
613 struct perf_regs regs_user; 614 struct perf_regs regs_user;
615 struct pt_regs regs_user_copy;
616
614 struct perf_regs regs_intr; 617 struct perf_regs regs_intr;
615 u64 stack_user_size; 618 u64 stack_user_size;
616} ____cacheline_aligned; 619} ____cacheline_aligned;
@@ -663,6 +666,7 @@ static inline int is_software_event(struct perf_event *event)
663 666
664extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 667extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
665 668
669extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
666extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 670extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
667 671
668#ifndef perf_arch_fetch_caller_regs 672#ifndef perf_arch_fetch_caller_regs
@@ -687,14 +691,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
687static __always_inline void 691static __always_inline void
688perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 692perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
689{ 693{
690 struct pt_regs hot_regs; 694 if (static_key_false(&perf_swevent_enabled[event_id]))
695 __perf_sw_event(event_id, nr, regs, addr);
696}
697
698DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
691 699
700/*
701 * 'Special' version for the scheduler, it hard assumes no recursion,
702 * which is guaranteed by us not actually scheduling inside other swevents
703 * because those disable preemption.
704 */
705static __always_inline void
706perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
707{
692 if (static_key_false(&perf_swevent_enabled[event_id])) { 708 if (static_key_false(&perf_swevent_enabled[event_id])) {
693 if (!regs) { 709 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
694 perf_fetch_caller_regs(&hot_regs); 710
695 regs = &hot_regs; 711 perf_fetch_caller_regs(regs);
696 } 712 ___perf_sw_event(event_id, nr, regs, addr);
697 __perf_sw_event(event_id, nr, regs, addr);
698 } 713 }
699} 714}
700 715
@@ -710,7 +725,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
710static inline void perf_event_task_sched_out(struct task_struct *prev, 725static inline void perf_event_task_sched_out(struct task_struct *prev,
711 struct task_struct *next) 726 struct task_struct *next)
712{ 727{
713 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 728 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
714 729
715 if (static_key_false(&perf_sched_events.key)) 730 if (static_key_false(&perf_sched_events.key))
716 __perf_event_task_sched_out(prev, next); 731 __perf_event_task_sched_out(prev, next);
@@ -821,6 +836,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
821static inline void 836static inline void
822perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } 837perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
823static inline void 838static inline void
839perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
840static inline void
824perf_bp_event(struct perf_event *event, void *data) { } 841perf_bp_event(struct perf_event *event, void *data) { }
825 842
826static inline int perf_register_guest_info_callbacks 843static inline int perf_register_guest_info_callbacks
@@ -897,12 +914,22 @@ struct perf_pmu_events_attr {
897 const char *event_str; 914 const char *event_str;
898}; 915};
899 916
917ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
918 char *page);
919
900#define PMU_EVENT_ATTR(_name, _var, _id, _show) \ 920#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
901static struct perf_pmu_events_attr _var = { \ 921static struct perf_pmu_events_attr _var = { \
902 .attr = __ATTR(_name, 0444, _show, NULL), \ 922 .attr = __ATTR(_name, 0444, _show, NULL), \
903 .id = _id, \ 923 .id = _id, \
904}; 924};
905 925
926#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
927static struct perf_pmu_events_attr _var = { \
928 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
929 .id = 0, \
930 .event_str = _str, \
931};
932
906#define PMU_FORMAT_ATTR(_name, _format) \ 933#define PMU_FORMAT_ATTR(_name, _format) \
907static ssize_t \ 934static ssize_t \
908_name##_show(struct device *dev, \ 935_name##_show(struct device *dev, \
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 3c73d5fe18be..a5f98d53d732 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,11 +1,19 @@
1#ifndef _LINUX_PERF_REGS_H 1#ifndef _LINUX_PERF_REGS_H
2#define _LINUX_PERF_REGS_H 2#define _LINUX_PERF_REGS_H
3 3
4struct perf_regs {
5 __u64 abi;
6 struct pt_regs *regs;
7};
8
4#ifdef CONFIG_HAVE_PERF_REGS 9#ifdef CONFIG_HAVE_PERF_REGS
5#include <asm/perf_regs.h> 10#include <asm/perf_regs.h>
6u64 perf_reg_value(struct pt_regs *regs, int idx); 11u64 perf_reg_value(struct pt_regs *regs, int idx);
7int perf_reg_validate(u64 mask); 12int perf_reg_validate(u64 mask);
8u64 perf_reg_abi(struct task_struct *task); 13u64 perf_reg_abi(struct task_struct *task);
14void perf_get_regs_user(struct perf_regs *regs_user,
15 struct pt_regs *regs,
16 struct pt_regs *regs_user_copy);
9#else 17#else
10static inline u64 perf_reg_value(struct pt_regs *regs, int idx) 18static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
11{ 19{
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
21{ 29{
22 return PERF_SAMPLE_REGS_ABI_NONE; 30 return PERF_SAMPLE_REGS_ABI_NONE;
23} 31}
32
33static inline void perf_get_regs_user(struct perf_regs *regs_user,
34 struct pt_regs *regs,
35 struct pt_regs *regs_user_copy)
36{
37 regs_user->regs = task_pt_regs(current);
38 regs_user->abi = perf_reg_abi(current);
39}
24#endif /* CONFIG_HAVE_PERF_REGS */ 40#endif /* CONFIG_HAVE_PERF_REGS */
25#endif /* _LINUX_PERF_REGS_H */ 41#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 22af8f8f5802..685809835b5c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,6 +327,8 @@ struct phy_c45_device_ids {
327 * c45_ids: 802.3-c45 Device Identifers if is_c45. 327 * c45_ids: 802.3-c45 Device Identifers if is_c45.
328 * is_c45: Set to true if this phy uses clause 45 addressing. 328 * is_c45: Set to true if this phy uses clause 45 addressing.
329 * is_internal: Set to true if this phy is internal to a MAC. 329 * is_internal: Set to true if this phy is internal to a MAC.
330 * has_fixups: Set to true if this phy has fixups/quirks.
331 * suspended: Set to true if this phy has been suspended successfully.
330 * state: state of the PHY for management purposes 332 * state: state of the PHY for management purposes
331 * dev_flags: Device-specific flags used by the PHY driver. 333 * dev_flags: Device-specific flags used by the PHY driver.
332 * addr: Bus address of PHY 334 * addr: Bus address of PHY
@@ -364,6 +366,7 @@ struct phy_device {
364 bool is_c45; 366 bool is_c45;
365 bool is_internal; 367 bool is_internal;
366 bool has_fixups; 368 bool has_fixups;
369 bool suspended;
367 370
368 enum phy_state state; 371 enum phy_state state;
369 372
@@ -565,6 +568,15 @@ struct phy_driver {
565 void (*write_mmd_indirect)(struct phy_device *dev, int ptrad, 568 void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
566 int devnum, int regnum, u32 val); 569 int devnum, int regnum, u32 val);
567 570
571 /* Get the size and type of the eeprom contained within a plug-in
572 * module */
573 int (*module_info)(struct phy_device *dev,
574 struct ethtool_modinfo *modinfo);
575
576 /* Get the eeprom information from the plug-in module */
577 int (*module_eeprom)(struct phy_device *dev,
578 struct ethtool_eeprom *ee, u8 *data);
579
568 struct device_driver driver; 580 struct device_driver driver;
569}; 581};
570#define to_phy_driver(d) container_of(d, struct phy_driver, driver) 582#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h
index e9e6cfbfbb58..eb7d4a135a9e 100644
--- a/include/linux/phy/omap_control_phy.h
+++ b/include/linux/phy/omap_control_phy.h
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
66#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 66#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0
67 67
68#define OMAP_CTRL_PCIE_PCS_MASK 0xff 68#define OMAP_CTRL_PCIE_PCS_MASK 0xff
69#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8 69#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16
70 70
71#define OMAP_CTRL_USB2_PHY_PD BIT(28) 71#define OMAP_CTRL_USB2_PHY_PD BIT(28)
72 72
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
79void omap_control_phy_power(struct device *dev, int on); 79void omap_control_phy_power(struct device *dev, int on);
80void omap_control_usb_set_mode(struct device *dev, 80void omap_control_usb_set_mode(struct device *dev,
81 enum omap_control_usb_mode mode); 81 enum omap_control_usb_mode mode);
82void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay); 82void omap_control_pcie_pcs(struct device *dev, u8 delay);
83#else 83#else
84 84
85static inline void omap_control_phy_power(struct device *dev, int on) 85static inline void omap_control_phy_power(struct device *dev, int on)
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
91{ 91{
92} 92}
93 93
94static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) 94static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
95{ 95{
96} 96}
97#endif 97#endif
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
new file mode 100644
index 000000000000..9d18e9f948e9
--- /dev/null
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef PHY_QCOM_UFS_H_
16#define PHY_QCOM_UFS_H_
17
18#include "phy.h"
19
20/**
21 * ufs_qcom_phy_enable_ref_clk() - Enable the phy
22 * ref clock.
23 * @phy: reference to a generic phy
24 *
25 * returns 0 for success, and non-zero for error.
26 */
27int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
28
29/**
30 * ufs_qcom_phy_disable_ref_clk() - Disable the phy
31 * ref clock.
32 * @phy: reference to a generic phy.
33 */
34void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
35
36/**
37 * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
38 * ref clock.
39 * @phy: reference to a generic phy.
40 */
41void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
42
43/**
44 * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
45 * ref clock.
46 * @phy: reference to a generic phy.
47 */
48void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
49
50int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
51void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
52int ufs_qcom_phy_start_serdes(struct phy *phy);
53int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
54int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
55int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
56void ufs_qcom_phy_save_controller_version(struct phy *phy,
57 u8 major, u16 minor, u16 step);
58
59#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index b9cf6c51b181..918b117a7cd3 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -19,7 +19,7 @@ struct pidmap {
19#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 19#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
20#define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE) 20#define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE)
21 21
22struct bsd_acct_struct; 22struct fs_pin;
23 23
24struct pid_namespace { 24struct pid_namespace {
25 struct kref kref; 25 struct kref kref;
@@ -37,7 +37,7 @@ struct pid_namespace {
37 struct dentry *proc_thread_self; 37 struct dentry *proc_thread_self;
38#endif 38#endif
39#ifdef CONFIG_BSD_PROCESS_ACCT 39#ifdef CONFIG_BSD_PROCESS_ACCT
40 struct bsd_acct_struct *bacct; 40 struct fs_pin *bacct;
41#endif 41#endif
42 struct user_namespace *user_ns; 42 struct user_namespace *user_ns;
43 struct work_struct proc_work; 43 struct work_struct proc_work;
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 18eccefea06e..72c0415d6c21 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
82 82
83static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) 83static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
84{ 84{
85 return NULL; 85 return ERR_PTR(-ENOSYS);
86} 86}
87 87
88static inline void pinctrl_put(struct pinctrl *p) 88static inline void pinctrl_put(struct pinctrl *p)
@@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
93 struct pinctrl *p, 93 struct pinctrl *p,
94 const char *name) 94 const char *name)
95{ 95{
96 return NULL; 96 return ERR_PTR(-ENOSYS);
97} 97}
98 98
99static inline int pinctrl_select_state(struct pinctrl *p, 99static inline int pinctrl_select_state(struct pinctrl *p,
@@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p,
104 104
105static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) 105static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
106{ 106{
107 return NULL; 107 return ERR_PTR(-ENOSYS);
108} 108}
109 109
110static inline void devm_pinctrl_put(struct pinctrl *p) 110static inline void devm_pinctrl_put(struct pinctrl *p)
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index d578a60eff23..fe65962b264f 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -115,6 +115,18 @@ enum pin_config_param {
115 PIN_CONFIG_END = 0x7FFF, 115 PIN_CONFIG_END = 0x7FFF,
116}; 116};
117 117
118#ifdef CONFIG_DEBUG_FS
119#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
120 .has_arg = d }
121
122struct pin_config_item {
123 const enum pin_config_param param;
124 const char * const display;
125 const char * const format;
126 bool has_arg;
127};
128#endif /* CONFIG_DEBUG_FS */
129
118/* 130/*
119 * Helpful configuration macro to be used in tables etc. 131 * Helpful configuration macro to be used in tables etc.
120 */ 132 */
@@ -150,6 +162,12 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
150struct pinctrl_dev; 162struct pinctrl_dev;
151struct pinctrl_map; 163struct pinctrl_map;
152 164
165struct pinconf_generic_params {
166 const char * const property;
167 enum pin_config_param param;
168 u32 default_value;
169};
170
153int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev, 171int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
154 struct device_node *np, struct pinctrl_map **map, 172 struct device_node *np, struct pinctrl_map **map,
155 unsigned *reserved_maps, unsigned *num_maps, 173 unsigned *reserved_maps, unsigned *num_maps,
@@ -174,6 +192,17 @@ static inline int pinconf_generic_dt_node_to_map_pin(
174 PIN_MAP_TYPE_CONFIGS_PIN); 192 PIN_MAP_TYPE_CONFIGS_PIN);
175} 193}
176 194
195static inline int pinconf_generic_dt_node_to_map_all(
196 struct pinctrl_dev *pctldev, struct device_node *np_config,
197 struct pinctrl_map **map, unsigned *num_maps)
198{
199 /*
200 * passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser
201 * to infer the map type from the DT properties used.
202 */
203 return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
204 PIN_MAP_TYPE_INVALID);
205}
177#endif 206#endif
178 207
179#endif /* CONFIG_GENERIC_PINCONF */ 208#endif /* CONFIG_GENERIC_PINCONF */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index cc8e1aff0e28..66e4697516de 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -24,6 +24,7 @@ struct pinctrl_dev;
24struct pinctrl_map; 24struct pinctrl_map;
25struct pinmux_ops; 25struct pinmux_ops;
26struct pinconf_ops; 26struct pinconf_ops;
27struct pin_config_item;
27struct gpio_chip; 28struct gpio_chip;
28struct device_node; 29struct device_node;
29 30
@@ -117,6 +118,12 @@ struct pinctrl_ops {
117 * @confops: pin config operations vtable, if you support pin configuration in 118 * @confops: pin config operations vtable, if you support pin configuration in
118 * your driver 119 * your driver
119 * @owner: module providing the pin controller, used for refcounting 120 * @owner: module providing the pin controller, used for refcounting
121 * @num_custom_params: Number of driver-specific custom parameters to be parsed
122 * from the hardware description
123 * @custom_params: List of driver_specific custom parameters to be parsed from
124 * the hardware description
125 * @custom_conf_items: Information how to print @params in debugfs, must be
126 * the same size as the @custom_params, i.e. @num_custom_params
120 */ 127 */
121struct pinctrl_desc { 128struct pinctrl_desc {
122 const char *name; 129 const char *name;
@@ -126,6 +133,11 @@ struct pinctrl_desc {
126 const struct pinmux_ops *pmxops; 133 const struct pinmux_ops *pmxops;
127 const struct pinconf_ops *confops; 134 const struct pinconf_ops *confops;
128 struct module *owner; 135 struct module *owner;
136#ifdef CONFIG_GENERIC_PINCONF
137 unsigned int num_custom_params;
138 const struct pinconf_generic_params *custom_params;
139 const struct pin_config_item *custom_conf_items;
140#endif
129}; 141};
130 142
131/* External interface to pin controller */ 143/* External interface to pin controller */
diff --git a/include/linux/platform_data/cpuidle-exynos.h b/include/linux/platform_data/cpuidle-exynos.h
new file mode 100644
index 000000000000..bfa40e4c5d5f
--- /dev/null
+++ b/include/linux/platform_data/cpuidle-exynos.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8*/
9
10#ifndef __CPUIDLE_EXYNOS_H
11#define __CPUIDLE_EXYNOS_H
12
13struct cpuidle_exynos_data {
14 int (*cpu0_enter_aftr)(void);
15 int (*cpu1_powerdown)(void);
16 void (*pre_enter_aftr)(void);
17 void (*post_enter_aftr)(void);
18};
19
20#endif
diff --git a/include/linux/platform_data/ipmmu-vmsa.h b/include/linux/platform_data/ipmmu-vmsa.h
deleted file mode 100644
index 5275b3ac6d37..000000000000
--- a/include/linux/platform_data/ipmmu-vmsa.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * IPMMU VMSA Platform Data
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 */
10
11#ifndef __IPMMU_VMSA_H__
12#define __IPMMU_VMSA_H__
13
14struct ipmmu_vmsa_master {
15 const char *name;
16 unsigned int utlb;
17};
18
19struct ipmmu_vmsa_platform_data {
20 const struct ipmmu_vmsa_master *masters;
21 unsigned int num_masters;
22};
23
24#endif /* __IPMMU_VMSA_H__ */
diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h
new file mode 100644
index 000000000000..38f77b5e56cf
--- /dev/null
+++ b/include/linux/platform_data/irda-sa11x0.h
@@ -0,0 +1,20 @@
1/*
2 * arch/arm/include/asm/mach/irda.h
3 *
4 * Copyright (C) 2004 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_MACH_IRDA_H
11#define __ASM_ARM_MACH_IRDA_H
12
13struct irda_platform_data {
14 int (*startup)(struct device *);
15 void (*shutdown)(struct device *);
16 int (*set_power)(struct device *, unsigned int state);
17 void (*set_speed)(struct device *, unsigned int speed);
18};
19
20#endif
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 5c188f4e9bec..929469291406 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -31,10 +31,6 @@ struct omap_mmc_platform_data {
31 void (*cleanup)(struct device *dev); 31 void (*cleanup)(struct device *dev);
32 void (*shutdown)(struct device *dev); 32 void (*shutdown)(struct device *dev);
33 33
34 /* To handle board related suspend/resume functionality for MMC */
35 int (*suspend)(struct device *dev, int slot);
36 int (*resume)(struct device *dev, int slot);
37
38 /* Return context loss count due to PM states changing */ 34 /* Return context loss count due to PM states changing */
39 int (*get_context_loss_count)(struct device *dev); 35 int (*get_context_loss_count)(struct device *dev);
40 36
diff --git a/include/linux/platform_data/regulator-haptic.h b/include/linux/platform_data/regulator-haptic.h
new file mode 100644
index 000000000000..5658e58e0738
--- /dev/null
+++ b/include/linux/platform_data/regulator-haptic.h
@@ -0,0 +1,29 @@
1/*
2 * Regulator Haptic Platform Data
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
6 * Author: Hyunhee Kim <hyunhee.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef _REGULATOR_HAPTIC_H
14#define _REGULATOR_HAPTIC_H
15
16/*
17 * struct regulator_haptic_data - Platform device data
18 *
19 * @max_volt: maximum voltage value supplied to the haptic motor.
20 * <The unit of the voltage is a micro>
21 * @min_volt: minimum voltage value supplied to the haptic motor.
22 * <The unit of the voltage is a micro>
23 */
24struct regulator_haptic_data {
25 unsigned int max_volt;
26 unsigned int min_volt;
27};
28
29#endif /* _REGULATOR_HAPTIC_H */
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 5087fff96d86..cc2bdafb0c69 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -26,6 +26,8 @@
26struct st21nfca_nfc_platform_data { 26struct st21nfca_nfc_platform_data {
27 unsigned int gpio_ena; 27 unsigned int gpio_ena;
28 unsigned int irq_polarity; 28 unsigned int irq_polarity;
29 bool is_ese_present;
30 bool is_uicc_present;
29}; 31};
30 32
31#endif /* _ST21NFCA_HCI_H_ */ 33#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
index c3b432f5b63e..b023373d9874 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st21nfcb.h
@@ -19,8 +19,6 @@
19#ifndef _ST21NFCB_NCI_H_ 19#ifndef _ST21NFCB_NCI_H_
20#define _ST21NFCB_NCI_H_ 20#define _ST21NFCB_NCI_H_
21 21
22#include <linux/i2c.h>
23
24#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" 22#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
25 23
26struct st21nfcb_nfc_platform_data { 24struct st21nfcb_nfc_platform_data {
@@ -28,4 +26,4 @@ struct st21nfcb_nfc_platform_data {
28 unsigned int irq_polarity; 26 unsigned int irq_polarity;
29}; 27};
30 28
31#endif /* _ST21NFCA_HCI_H_ */ 29#endif /* _ST21NFCB_NCI_H_ */
diff --git a/include/linux/platform_data/tpm_stm_st33.h b/include/linux/platform_data/tpm_stm_st33.h
new file mode 100644
index 000000000000..ff75310c0f47
--- /dev/null
+++ b/include/linux/platform_data/tpm_stm_st33.h
@@ -0,0 +1,39 @@
1/*
2 * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
3 * Copyright (C) 2009, 2010 STMicroelectronics
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * STMicroelectronics version 1.2.0, Copyright (C) 2010
19 * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
20 * This is free software, and you are welcome to redistribute it
21 * under certain conditions.
22 *
23 * @Author: Christophe RICARD tpmsupport@st.com
24 *
25 * @File: stm_st33_tpm.h
26 *
27 * @Date: 09/15/2010
28 */
29#ifndef __STM_ST33_TPM_H__
30#define __STM_ST33_TPM_H__
31
32#define TPM_ST33_I2C "st33zp24-i2c"
33#define TPM_ST33_SPI "st33zp24-spi"
34
35struct st33zp24_platform_data {
36 int io_lpcpd;
37};
38
39#endif /* __STM_ST33_TPM_H__ */
diff --git a/include/linux/platform_data/vsp1.h b/include/linux/platform_data/vsp1.h
deleted file mode 100644
index 63170e2614b3..000000000000
--- a/include/linux/platform_data/vsp1.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * vsp1.h -- R-Car VSP1 Platform Data
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13#ifndef __PLATFORM_VSP1_H__
14#define __PLATFORM_VSP1_H__
15
16#define VSP1_HAS_LIF (1 << 0)
17#define VSP1_HAS_LUT (1 << 1)
18#define VSP1_HAS_SRU (1 << 2)
19
20struct vsp1_platform_data {
21 unsigned int features;
22 unsigned int rpf_count;
23 unsigned int uds_count;
24 unsigned int wpf_count;
25};
26
27#endif /* __PLATFORM_VSP1_H__ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 8b5976364619..e2f1be6dd9dd 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -597,7 +597,7 @@ struct dev_pm_info {
597 597
598extern void update_pm_runtime_accounting(struct device *dev); 598extern void update_pm_runtime_accounting(struct device *dev);
599extern int dev_pm_get_subsys_data(struct device *dev); 599extern int dev_pm_get_subsys_data(struct device *dev);
600extern int dev_pm_put_subsys_data(struct device *dev); 600extern void dev_pm_put_subsys_data(struct device *dev);
601 601
602/* 602/*
603 * Power domains provide callbacks that are executed during system suspend, 603 * Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a9edab2c787a..080e778118ba 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -113,8 +113,6 @@ struct generic_pm_domain_data {
113 struct pm_domain_data base; 113 struct pm_domain_data base;
114 struct gpd_timing_data td; 114 struct gpd_timing_data td;
115 struct notifier_block nb; 115 struct notifier_block nb;
116 struct mutex lock;
117 unsigned int refcount;
118 int need_restore; 116 int need_restore;
119}; 117};
120 118
@@ -140,7 +138,6 @@ extern int __pm_genpd_name_add_device(const char *domain_name,
140 138
141extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, 139extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
142 struct device *dev); 140 struct device *dev);
143extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
144extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 141extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
145 struct generic_pm_domain *new_subdomain); 142 struct generic_pm_domain *new_subdomain);
146extern int pm_genpd_add_subdomain_names(const char *master_name, 143extern int pm_genpd_add_subdomain_names(const char *master_name,
@@ -187,7 +184,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
187{ 184{
188 return -ENOSYS; 185 return -ENOSYS;
189} 186}
190static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
191static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 187static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
192 struct generic_pm_domain *new_sd) 188 struct generic_pm_domain *new_sd)
193{ 189{
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 195aafc6cd07..6512e9cbc6d5 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -12,6 +12,7 @@
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/mod_devicetable.h> 14#include <linux/mod_devicetable.h>
15#include <linux/console.h>
15 16
16#define PNP_NAME_LEN 50 17#define PNP_NAME_LEN 50
17 18
@@ -309,15 +310,22 @@ struct pnp_fixup {
309#define PNP_DISABLE 0x0004 310#define PNP_DISABLE 0x0004
310#define PNP_CONFIGURABLE 0x0008 311#define PNP_CONFIGURABLE 0x0008
311#define PNP_REMOVABLE 0x0010 312#define PNP_REMOVABLE 0x0010
313#define PNP_CONSOLE 0x0020
312 314
313#define pnp_can_read(dev) (((dev)->protocol->get) && \ 315#define pnp_can_read(dev) (((dev)->protocol->get) && \
314 ((dev)->capabilities & PNP_READ)) 316 ((dev)->capabilities & PNP_READ))
315#define pnp_can_write(dev) (((dev)->protocol->set) && \ 317#define pnp_can_write(dev) (((dev)->protocol->set) && \
316 ((dev)->capabilities & PNP_WRITE)) 318 ((dev)->capabilities & PNP_WRITE))
317#define pnp_can_disable(dev) (((dev)->protocol->disable) && \ 319#define pnp_can_disable(dev) (((dev)->protocol->disable) && \
318 ((dev)->capabilities & PNP_DISABLE)) 320 ((dev)->capabilities & PNP_DISABLE) && \
321 (!((dev)->capabilities & PNP_CONSOLE) || \
322 console_suspend_enabled))
319#define pnp_can_configure(dev) ((!(dev)->active) && \ 323#define pnp_can_configure(dev) ((!(dev)->active) && \
320 ((dev)->capabilities & PNP_CONFIGURABLE)) 324 ((dev)->capabilities & PNP_CONFIGURABLE))
325#define pnp_can_suspend(dev) (((dev)->protocol->suspend) && \
326 (!((dev)->capabilities & PNP_CONSOLE) || \
327 console_suspend_enabled))
328
321 329
322#ifdef CONFIG_ISAPNP 330#ifdef CONFIG_ISAPNP
323extern struct pnp_protocol isapnp_protocol; 331extern struct pnp_protocol isapnp_protocol;
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index e97fc656a058..416ebeb6ee1e 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -17,6 +17,7 @@
17 17
18#include <linux/power_supply.h> 18#include <linux/power_supply.h>
19#include <linux/extcon.h> 19#include <linux/extcon.h>
20#include <linux/alarmtimer.h>
20 21
21enum data_source { 22enum data_source {
22 CM_BATTERY_PRESENT, 23 CM_BATTERY_PRESENT,
@@ -45,29 +46,6 @@ enum cm_event_types {
45}; 46};
46 47
47/** 48/**
48 * struct charger_global_desc
49 * @rtc_name: the name of RTC used to wake up the system from suspend.
50 * @rtc_only_wakeup:
51 * If the system is woken up by waekup-sources other than the RTC or
52 * callbacks, Charger Manager should recognize with
53 * rtc_only_wakeup() returning false.
54 * If the RTC given to CM is the only wakeup reason,
55 * rtc_only_wakeup should return true.
56 * @assume_timer_stops_in_suspend:
57 * Assume that the jiffy timer stops in suspend-to-RAM.
58 * When enabled, CM does not rely on jiffies value in
59 * suspend_again and assumes that jiffies value does not
60 * change during suspend.
61 */
62struct charger_global_desc {
63 char *rtc_name;
64
65 bool (*rtc_only_wakeup)(void);
66
67 bool assume_timer_stops_in_suspend;
68};
69
70/**
71 * struct charger_cable 49 * struct charger_cable
72 * @extcon_name: the name of extcon device. 50 * @extcon_name: the name of extcon device.
73 * @name: the name of charger cable(external connector). 51 * @name: the name of charger cable(external connector).
@@ -266,22 +244,14 @@ struct charger_manager {
266 char psy_name_buf[PSY_NAME_MAX + 1]; 244 char psy_name_buf[PSY_NAME_MAX + 1];
267 struct power_supply charger_psy; 245 struct power_supply charger_psy;
268 246
269 bool status_save_ext_pwr_inserted;
270 bool status_save_batt;
271
272 u64 charging_start_time; 247 u64 charging_start_time;
273 u64 charging_end_time; 248 u64 charging_end_time;
274}; 249};
275 250
276#ifdef CONFIG_CHARGER_MANAGER 251#ifdef CONFIG_CHARGER_MANAGER
277extern int setup_charger_manager(struct charger_global_desc *gd);
278extern bool cm_suspend_again(void);
279extern void cm_notify_event(struct power_supply *psy, 252extern void cm_notify_event(struct power_supply *psy,
280 enum cm_event_types type, char *msg); 253 enum cm_event_types type, char *msg);
281#else 254#else
282static inline int setup_charger_manager(struct charger_global_desc *gd)
283{ return 0; }
284static inline bool cm_suspend_again(void) { return false; }
285static inline void cm_notify_event(struct power_supply *psy, 255static inline void cm_notify_event(struct power_supply *psy,
286 enum cm_event_types type, char *msg) { } 256 enum cm_event_types type, char *msg) { }
287#endif 257#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c8f170324e64..baa3f97d8ce8 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -10,9 +10,6 @@
10extern const char linux_banner[]; 10extern const char linux_banner[];
11extern const char linux_proc_banner[]; 11extern const char linux_proc_banner[];
12 12
13extern char *log_buf_addr_get(void);
14extern u32 log_buf_len_get(void);
15
16static inline int printk_get_level(const char *buffer) 13static inline int printk_get_level(const char *buffer)
17{ 14{
18 if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { 15 if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -163,6 +160,8 @@ extern int kptr_restrict;
163 160
164extern void wake_up_klogd(void); 161extern void wake_up_klogd(void);
165 162
163char *log_buf_addr_get(void);
164u32 log_buf_len_get(void);
166void log_buf_kexec_setup(void); 165void log_buf_kexec_setup(void);
167void __init setup_log_buf(int early); 166void __init setup_log_buf(int early);
168void dump_stack_set_arch_desc(const char *fmt, ...); 167void dump_stack_set_arch_desc(const char *fmt, ...);
@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)
198{ 197{
199} 198}
200 199
200static inline char *log_buf_addr_get(void)
201{
202 return NULL;
203}
204
205static inline u32 log_buf_len_get(void)
206{
207 return 0;
208}
209
201static inline void log_buf_kexec_setup(void) 210static inline void log_buf_kexec_setup(void)
202{ 211{
203} 212}
@@ -408,9 +417,9 @@ enum {
408 DUMP_PREFIX_ADDRESS, 417 DUMP_PREFIX_ADDRESS,
409 DUMP_PREFIX_OFFSET 418 DUMP_PREFIX_OFFSET
410}; 419};
411extern void hex_dump_to_buffer(const void *buf, size_t len, 420extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
412 int rowsize, int groupsize, 421 int groupsize, char *linebuf, size_t linebuflen,
413 char *linebuf, size_t linebuflen, bool ascii); 422 bool ascii);
414#ifdef CONFIG_PRINTK 423#ifdef CONFIG_PRINTK
415extern void print_hex_dump(const char *level, const char *prefix_str, 424extern void print_hex_dump(const char *level, const char *prefix_str,
416 int prefix_type, int rowsize, int groupsize, 425 int prefix_type, int rowsize, int groupsize,
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ece0c6bbfcc5..8884f6e507f7 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -39,6 +39,7 @@ enum pstore_type_id {
39 PSTORE_TYPE_PPC_RTAS = 4, 39 PSTORE_TYPE_PPC_RTAS = 4,
40 PSTORE_TYPE_PPC_OF = 5, 40 PSTORE_TYPE_PPC_OF = 5,
41 PSTORE_TYPE_PPC_COMMON = 6, 41 PSTORE_TYPE_PPC_COMMON = 6,
42 PSTORE_TYPE_PMSG = 7,
42 PSTORE_TYPE_UNKNOWN = 255 43 PSTORE_TYPE_UNKNOWN = 255
43}; 44};
44 45
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 4af3fdc85b01..9c9d6c154c8e 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -81,6 +81,7 @@ struct ramoops_platform_data {
81 unsigned long record_size; 81 unsigned long record_size;
82 unsigned long console_size; 82 unsigned long console_size;
83 unsigned long ftrace_size; 83 unsigned long ftrace_size;
84 unsigned long pmsg_size;
84 int dump_oops; 85 int dump_oops;
85 struct persistent_ram_ecc_info ecc_info; 86 struct persistent_ram_ecc_info ecc_info;
86}; 87};
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 77aed9ea1d26..dab545bb66b3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -37,6 +37,7 @@
37#define SSDR (0x10) /* SSP Data Write/Data Read Register */ 37#define SSDR (0x10) /* SSP Data Write/Data Read Register */
38 38
39#define SSTO (0x28) /* SSP Time Out Register */ 39#define SSTO (0x28) /* SSP Time Out Register */
40#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
40#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ 41#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
41#define SSTSA (0x30) /* SSP Tx Timeslot Active */ 42#define SSTSA (0x30) /* SSP Tx Timeslot Active */
42#define SSRSA (0x34) /* SSP Rx Timeslot Active */ 43#define SSRSA (0x34) /* SSP Rx Timeslot Active */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 50978b781a19..d534e8ed308a 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -216,19 +216,21 @@ struct mem_dqinfo {
216 unsigned long dqi_flags; 216 unsigned long dqi_flags;
217 unsigned int dqi_bgrace; 217 unsigned int dqi_bgrace;
218 unsigned int dqi_igrace; 218 unsigned int dqi_igrace;
219 qsize_t dqi_maxblimit; 219 qsize_t dqi_max_spc_limit;
220 qsize_t dqi_maxilimit; 220 qsize_t dqi_max_ino_limit;
221 void *dqi_priv; 221 void *dqi_priv;
222}; 222};
223 223
224struct super_block; 224struct super_block;
225 225
226#define DQF_MASK 0xffff /* Mask for format specific flags */ 226/* Mask for flags passed to userspace */
227#define DQF_GETINFO_MASK 0x1ffff /* Mask for flags passed to userspace */ 227#define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE)
228#define DQF_SETINFO_MASK 0xffff /* Mask for flags modifiable from userspace */ 228/* Mask for flags modifiable from userspace */
229#define DQF_SYS_FILE_B 16 229#define DQF_SETINFO_MASK DQF_ROOT_SQUASH
230#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B) /* Quota file stored as system file */ 230
231#define DQF_INFO_DIRTY_B 31 231enum {
232 DQF_INFO_DIRTY_B = DQF_PRIVATE,
233};
232#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ 234#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
233 235
234extern void mark_info_dirty(struct super_block *sb, int type); 236extern void mark_info_dirty(struct super_block *sb, int type);
@@ -321,18 +323,61 @@ struct dquot_operations {
321 323
322struct path; 324struct path;
323 325
326/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
327struct qc_dqblk {
328 int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
329 u64 d_spc_hardlimit; /* absolute limit on used space */
330 u64 d_spc_softlimit; /* preferred limit on used space */
331 u64 d_ino_hardlimit; /* maximum # allocated inodes */
332 u64 d_ino_softlimit; /* preferred inode limit */
333 u64 d_space; /* Space owned by the user */
334 u64 d_ino_count; /* # inodes owned by the user */
335 s64 d_ino_timer; /* zero if within inode limits */
336 /* if not, we refuse service */
337 s64 d_spc_timer; /* similar to above; for space */
338 int d_ino_warns; /* # warnings issued wrt num inodes */
339 int d_spc_warns; /* # warnings issued wrt used space */
340 u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
341 u64 d_rt_spc_softlimit; /* preferred limit on RT space */
342 u64 d_rt_space; /* realtime space owned */
343 s64 d_rt_spc_timer; /* similar to above; for RT space */
344 int d_rt_spc_warns; /* # warnings issued wrt RT space */
345};
346
347/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
348#define QC_INO_SOFT (1<<0)
349#define QC_INO_HARD (1<<1)
350#define QC_SPC_SOFT (1<<2)
351#define QC_SPC_HARD (1<<3)
352#define QC_RT_SPC_SOFT (1<<4)
353#define QC_RT_SPC_HARD (1<<5)
354#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
355 QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
356#define QC_SPC_TIMER (1<<6)
357#define QC_INO_TIMER (1<<7)
358#define QC_RT_SPC_TIMER (1<<8)
359#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
360#define QC_SPC_WARNS (1<<9)
361#define QC_INO_WARNS (1<<10)
362#define QC_RT_SPC_WARNS (1<<11)
363#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
364#define QC_SPACE (1<<12)
365#define QC_INO_COUNT (1<<13)
366#define QC_RT_SPACE (1<<14)
367#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
368
324/* Operations handling requests from userspace */ 369/* Operations handling requests from userspace */
325struct quotactl_ops { 370struct quotactl_ops {
326 int (*quota_on)(struct super_block *, int, int, struct path *); 371 int (*quota_on)(struct super_block *, int, int, struct path *);
327 int (*quota_on_meta)(struct super_block *, int, int);
328 int (*quota_off)(struct super_block *, int); 372 int (*quota_off)(struct super_block *, int);
373 int (*quota_enable)(struct super_block *, unsigned int);
374 int (*quota_disable)(struct super_block *, unsigned int);
329 int (*quota_sync)(struct super_block *, int); 375 int (*quota_sync)(struct super_block *, int);
330 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 376 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
331 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 377 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
332 int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 378 int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
333 int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); 379 int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
334 int (*get_xstate)(struct super_block *, struct fs_quota_stat *); 380 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
335 int (*set_xstate)(struct super_block *, unsigned int, int);
336 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); 381 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
337 int (*rm_xquota)(struct super_block *, unsigned int); 382 int (*rm_xquota)(struct super_block *, unsigned int);
338}; 383};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f23538a6e411..df73258cca47 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
98int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 98int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
99int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 99int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
100int dquot_get_dqblk(struct super_block *sb, struct kqid id, 100int dquot_get_dqblk(struct super_block *sb, struct kqid id,
101 struct fs_disk_quota *di); 101 struct qc_dqblk *di);
102int dquot_set_dqblk(struct super_block *sb, struct kqid id, 102int dquot_set_dqblk(struct super_block *sb, struct kqid id,
103 struct fs_disk_quota *di); 103 struct qc_dqblk *di);
104 104
105int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); 105int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
106int dquot_transfer(struct inode *inode, struct iattr *iattr); 106int dquot_transfer(struct inode *inode, struct iattr *iattr);
@@ -166,6 +166,7 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
166 */ 166 */
167extern const struct dquot_operations dquot_operations; 167extern const struct dquot_operations dquot_operations;
168extern const struct quotactl_ops dquot_quotactl_ops; 168extern const struct quotactl_ops dquot_quotactl_ops;
169extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
169 170
170#else 171#else
171 172
@@ -386,4 +387,6 @@ static inline void dquot_release_reservation_block(struct inode *inode,
386 __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE); 387 __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
387} 388}
388 389
390unsigned int qtype_enforce_flag(int type);
391
389#endif /* _LINUX_QUOTAOPS_ */ 392#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 57e75ae9910f..fb31765e935a 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -51,7 +51,7 @@ struct rb_root {
51 51
52#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) 52#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
53 53
54/* 'empty' nodes are nodes that are known not to be inserted in an rbree */ 54/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
55#define RB_EMPTY_NODE(node) \ 55#define RB_EMPTY_NODE(node) \
56 ((node)->__rb_parent_color == (unsigned long)(node)) 56 ((node)->__rb_parent_color == (unsigned long)(node))
57#define RB_CLEAR_NODE(node) \ 57#define RB_CLEAR_NODE(node) \
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 529bc946f450..a18b16f1dc0e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
524 * @member: the name of the hlist_node within the struct. 524 * @member: the name of the hlist_node within the struct.
525 */ 525 */
526#define hlist_for_each_entry_continue_rcu(pos, member) \ 526#define hlist_for_each_entry_continue_rcu(pos, member) \
527 for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 527 for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
528 typeof(*(pos)), member); \ 528 &(pos)->member)), typeof(*(pos)), member); \
529 pos; \ 529 pos; \
530 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 530 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
531 typeof(*(pos)), member)) 531 &(pos)->member)), typeof(*(pos)), member))
532 532
533/** 533/**
534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
536 * @member: the name of the hlist_node within the struct. 536 * @member: the name of the hlist_node within the struct.
537 */ 537 */
538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ 538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
539 for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 539 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
540 typeof(*(pos)), member); \ 540 &(pos)->member)), typeof(*(pos)), member); \
541 pos; \ 541 pos; \
542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 542 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
543 typeof(*(pos)), member)) 543 &(pos)->member)), typeof(*(pos)), member))
544 544
545/** 545/**
546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point 546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ed4f5939a452..78097491cd99 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
331extern struct srcu_struct tasks_rcu_exit_srcu; 331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \ 332#define rcu_note_voluntary_context_switch(t) \
333 do { \ 333 do { \
334 rcu_all_qs(); \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ 335 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ 336 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0) 337 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */ 338#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0) 339#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0) 340#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
340#endif /* #else #ifdef CONFIG_TASKS_RCU */ 341#endif /* #else #ifdef CONFIG_TASKS_RCU */
341 342
342/** 343/**
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
582}) 583})
583#define __rcu_dereference_check(p, c, space) \ 584#define __rcu_dereference_check(p, c, space) \
584({ \ 585({ \
585 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ 586 /* Dependency order vs. p above. */ \
587 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
586 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ 588 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
587 rcu_dereference_sparse(p, space); \ 589 rcu_dereference_sparse(p, space); \
588 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 590 ((typeof(*p) __force __kernel *)(________p1)); \
589 ((typeof(*p) __force __kernel *)(_________p1)); \
590}) 591})
591#define __rcu_dereference_protected(p, c, space) \ 592#define __rcu_dereference_protected(p, c, space) \
592({ \ 593({ \
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
603}) 604})
604#define __rcu_dereference_index_check(p, c) \ 605#define __rcu_dereference_index_check(p, c) \
605({ \ 606({ \
606 typeof(p) _________p1 = ACCESS_ONCE(p); \ 607 /* Dependency order vs. p above. */ \
608 typeof(p) _________p1 = lockless_dereference(p); \
607 rcu_lockdep_assert(c, \ 609 rcu_lockdep_assert(c, \
608 "suspicious rcu_dereference_index_check() usage"); \ 610 "suspicious rcu_dereference_index_check() usage"); \
609 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
610 (_________p1); \ 611 (_________p1); \
611}) 612})
612 613
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 0e5366200154..937edaeb150d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
92} 92}
93 93
94/* 94/*
95 * Return the number of grace periods. 95 * Return the number of grace periods started.
96 */ 96 */
97static inline long rcu_batches_completed(void) 97static inline unsigned long rcu_batches_started(void)
98{ 98{
99 return 0; 99 return 0;
100} 100}
101 101
102/* 102/*
103 * Return the number of bottom-half grace periods. 103 * Return the number of bottom-half grace periods started.
104 */ 104 */
105static inline long rcu_batches_completed_bh(void) 105static inline unsigned long rcu_batches_started_bh(void)
106{
107 return 0;
108}
109
110/*
111 * Return the number of sched grace periods started.
112 */
113static inline unsigned long rcu_batches_started_sched(void)
114{
115 return 0;
116}
117
118/*
119 * Return the number of grace periods completed.
120 */
121static inline unsigned long rcu_batches_completed(void)
122{
123 return 0;
124}
125
126/*
127 * Return the number of bottom-half grace periods completed.
128 */
129static inline unsigned long rcu_batches_completed_bh(void)
130{
131 return 0;
132}
133
134/*
135 * Return the number of sched grace periods completed.
136 */
137static inline unsigned long rcu_batches_completed_sched(void)
106{ 138{
107 return 0; 139 return 0;
108} 140}
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
154 return true; 186 return true;
155} 187}
156 188
157
158#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 189#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
159 190
191static inline void rcu_all_qs(void)
192{
193}
194
160#endif /* __LINUX_RCUTINY_H */ 195#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 52953790dcca..d2e583a6aaca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
81 81
82extern unsigned long rcutorture_testseq; 82extern unsigned long rcutorture_testseq;
83extern unsigned long rcutorture_vernum; 83extern unsigned long rcutorture_vernum;
84long rcu_batches_completed(void); 84unsigned long rcu_batches_started(void);
85long rcu_batches_completed_bh(void); 85unsigned long rcu_batches_started_bh(void);
86long rcu_batches_completed_sched(void); 86unsigned long rcu_batches_started_sched(void);
87unsigned long rcu_batches_completed(void);
88unsigned long rcu_batches_completed_bh(void);
89unsigned long rcu_batches_completed_sched(void);
87void show_rcu_gp_kthreads(void); 90void show_rcu_gp_kthreads(void);
88 91
89void rcu_force_quiescent_state(void); 92void rcu_force_quiescent_state(void);
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
97 100
98bool rcu_is_watching(void); 101bool rcu_is_watching(void);
99 102
103void rcu_all_qs(void);
104
100#endif /* __LINUX_RCUTREE_H */ 105#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4419b99d8d6e..116655d92269 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
468 * 468 *
469 * @reg: Offset of the register within the regmap bank 469 * @reg: Offset of the register within the regmap bank
470 * @lsb: lsb of the register field. 470 * @lsb: lsb of the register field.
471 * @reg: msb of the register field. 471 * @msb: msb of the register field.
472 * @id_size: port size if it has some ports 472 * @id_size: port size if it has some ports
473 * @id_offset: address offset for each ports 473 * @id_offset: address offset for each ports
474 */ 474 */
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5479394fefce..5dd65acc2a69 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -32,6 +32,8 @@ struct da9211_pdata {
32 * 2 : 2 phase 2 buck 32 * 2 : 2 phase 2 buck
33 */ 33 */
34 int num_buck; 34 int num_buck;
35 int gpio_ren[DA9211_MAX_REGULATORS];
36 struct device_node *reg_node[DA9211_MAX_REGULATORS];
35 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; 37 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
36}; 38};
37#endif 39#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 5f1e9ca47417..d4ad5b5a02bb 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -21,6 +21,7 @@
21 21
22struct regmap; 22struct regmap;
23struct regulator_dev; 23struct regulator_dev;
24struct regulator_config;
24struct regulator_init_data; 25struct regulator_init_data;
25struct regulator_enable_gpio; 26struct regulator_enable_gpio;
26 27
@@ -205,6 +206,15 @@ enum regulator_type {
205 * @supply_name: Identifying the regulator supply 206 * @supply_name: Identifying the regulator supply
206 * @of_match: Name used to identify regulator in DT. 207 * @of_match: Name used to identify regulator in DT.
207 * @regulators_node: Name of node containing regulator definitions in DT. 208 * @regulators_node: Name of node containing regulator definitions in DT.
209 * @of_parse_cb: Optional callback called only if of_match is present.
210 * Will be called for each regulator parsed from DT, during
211 * init_data parsing.
212 * The regulator_config passed as argument to the callback will
213 * be a copy of config passed to regulator_register, valid only
214 * for this particular call. Callback may freely change the
215 * config but it cannot store it for later usage.
216 * Callback should return 0 on success or negative ERRNO
217 * indicating failure.
208 * @id: Numerical identifier for the regulator. 218 * @id: Numerical identifier for the regulator.
209 * @ops: Regulator operations table. 219 * @ops: Regulator operations table.
210 * @irq: Interrupt number for the regulator. 220 * @irq: Interrupt number for the regulator.
@@ -251,6 +261,9 @@ struct regulator_desc {
251 const char *supply_name; 261 const char *supply_name;
252 const char *of_match; 262 const char *of_match;
253 const char *regulators_node; 263 const char *regulators_node;
264 int (*of_parse_cb)(struct device_node *,
265 const struct regulator_desc *,
266 struct regulator_config *);
254 int id; 267 int id;
255 bool continuous_voltage_range; 268 bool continuous_voltage_range;
256 unsigned n_voltages; 269 unsigned n_voltages;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0b08d05d470b..b07562e082c4 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -191,15 +191,22 @@ struct regulator_init_data {
191 void *driver_data; /* core does not touch this */ 191 void *driver_data; /* core does not touch this */
192}; 192};
193 193
194int regulator_suspend_prepare(suspend_state_t state);
195int regulator_suspend_finish(void);
196
197#ifdef CONFIG_REGULATOR 194#ifdef CONFIG_REGULATOR
198void regulator_has_full_constraints(void); 195void regulator_has_full_constraints(void);
196int regulator_suspend_prepare(suspend_state_t state);
197int regulator_suspend_finish(void);
199#else 198#else
200static inline void regulator_has_full_constraints(void) 199static inline void regulator_has_full_constraints(void)
201{ 200{
202} 201}
202static inline int regulator_suspend_prepare(suspend_state_t state)
203{
204 return 0;
205}
206static inline int regulator_suspend_finish(void)
207{
208 return 0;
209}
203#endif 210#endif
204 211
205#endif 212#endif
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644
index 000000000000..30cc5963e265
--- /dev/null
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu <flora.fu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __LINUX_REGULATOR_MT6397_H
16#define __LINUX_REGULATOR_MT6397_H
17
18enum {
19 MT6397_ID_VPCA15 = 0,
20 MT6397_ID_VPCA7,
21 MT6397_ID_VSRAMCA15,
22 MT6397_ID_VSRAMCA7,
23 MT6397_ID_VCORE,
24 MT6397_ID_VGPU,
25 MT6397_ID_VDRM,
26 MT6397_ID_VIO18 = 7,
27 MT6397_ID_VTCXO,
28 MT6397_ID_VA28,
29 MT6397_ID_VCAMA,
30 MT6397_ID_VIO28,
31 MT6397_ID_VUSB,
32 MT6397_ID_VMC,
33 MT6397_ID_VMCH,
34 MT6397_ID_VEMC3V3,
35 MT6397_ID_VGP1,
36 MT6397_ID_VGP2,
37 MT6397_ID_VGP3,
38 MT6397_ID_VGP4,
39 MT6397_ID_VGP5,
40 MT6397_ID_VGP6,
41 MT6397_ID_VIBR,
42 MT6397_ID_RG_MAX,
43};
44
45#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX
46#define MT6397_REGULATOR_ID97 0x97
47#define MT6397_REGULATOR_ID91 0x91
48
49#endif /* __LINUX_REGULATOR_MT6397_H */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 364f7a7c43db..70c6c66c5bcf 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -49,6 +49,20 @@
49#define PFUZE200_VGEN5 11 49#define PFUZE200_VGEN5 11
50#define PFUZE200_VGEN6 12 50#define PFUZE200_VGEN6 12
51 51
52#define PFUZE3000_SW1A 0
53#define PFUZE3000_SW1B 1
54#define PFUZE3000_SW2 2
55#define PFUZE3000_SW3 3
56#define PFUZE3000_SWBST 4
57#define PFUZE3000_VSNVS 5
58#define PFUZE3000_VREFDDR 6
59#define PFUZE3000_VLDO1 7
60#define PFUZE3000_VLDO2 8
61#define PFUZE3000_VCCSD 9
62#define PFUZE3000_V33 10
63#define PFUZE3000_VLDO3 11
64#define PFUZE3000_VLDO4 12
65
52struct regulator_init_data; 66struct regulator_init_data;
53 67
54struct pfuze_regulator_platform_data { 68struct pfuze_regulator_platform_data {
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
new file mode 100644
index 000000000000..e2bf63d881d4
--- /dev/null
+++ b/include/linux/resource_ext.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2015, Intel Corporation
3 * Author: Jiang Liu <jiang.liu@linux.intel.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#ifndef _LINUX_RESOURCE_EXT_H
15#define _LINUX_RESOURCE_EXT_H
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/ioport.h>
19#include <linux/slab.h>
20
21/* Represent resource window for bridge devices */
22struct resource_win {
23 struct resource res; /* In master (CPU) address space */
24 resource_size_t offset; /* Translation offset for bridge */
25};
26
27/*
28 * Common resource list management data structure and interfaces to support
29 * ACPI, PNP and PCI host bridge etc.
30 */
31struct resource_entry {
32 struct list_head node;
33 struct resource *res; /* In master (CPU) address space */
34 resource_size_t offset; /* Translation offset for bridge */
35 struct resource __res; /* Default storage for res */
36};
37
38extern struct resource_entry *
39resource_list_create_entry(struct resource *res, size_t extra_size);
40extern void resource_list_free(struct list_head *head);
41
42static inline void resource_list_add(struct resource_entry *entry,
43 struct list_head *head)
44{
45 list_add(&entry->node, head);
46}
47
48static inline void resource_list_add_tail(struct resource_entry *entry,
49 struct list_head *head)
50{
51 list_add_tail(&entry->node, head);
52}
53
54static inline void resource_list_del(struct resource_entry *entry)
55{
56 list_del(&entry->node);
57}
58
59static inline void resource_list_free_entry(struct resource_entry *entry)
60{
61 kfree(entry);
62}
63
64static inline void
65resource_list_destroy_entry(struct resource_entry *entry)
66{
67 resource_list_del(entry);
68 resource_list_free_entry(entry);
69}
70
71#define resource_list_for_each_entry(entry, list) \
72 list_for_each_entry((entry), (list), node)
73
74#define resource_list_for_each_entry_safe(entry, tmp, list) \
75 list_for_each_entry_safe((entry), (tmp), (list), node)
76
77#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index b93fd89b2e5e..58851275fed9 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -18,16 +18,45 @@
18#ifndef _LINUX_RHASHTABLE_H 18#ifndef _LINUX_RHASHTABLE_H
19#define _LINUX_RHASHTABLE_H 19#define _LINUX_RHASHTABLE_H
20 20
21#include <linux/rculist.h> 21#include <linux/compiler.h>
22#include <linux/list_nulls.h>
23#include <linux/workqueue.h>
24#include <linux/mutex.h>
25
26/*
27 * The end of the chain is marked with a special nulls marks which has
28 * the following format:
29 *
30 * +-------+-----------------------------------------------------+-+
31 * | Base | Hash |1|
32 * +-------+-----------------------------------------------------+-+
33 *
34 * Base (4 bits) : Reserved to distinguish between multiple tables.
35 * Specified via &struct rhashtable_params.nulls_base.
36 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
37 * 1 (1 bit) : Nulls marker (always set)
38 *
39 * The remaining bits of the next pointer remain unused for now.
40 */
41#define RHT_BASE_BITS 4
42#define RHT_HASH_BITS 27
43#define RHT_BASE_SHIFT RHT_HASH_BITS
22 44
23struct rhash_head { 45struct rhash_head {
24 struct rhash_head __rcu *next; 46 struct rhash_head __rcu *next;
25}; 47};
26 48
27#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) 49/**
28 50 * struct bucket_table - Table of hash buckets
51 * @size: Number of hash buckets
52 * @locks_mask: Mask to apply before accessing locks[]
53 * @locks: Array of spinlocks protecting individual buckets
54 * @buckets: size * hash buckets
55 */
29struct bucket_table { 56struct bucket_table {
30 size_t size; 57 size_t size;
58 unsigned int locks_mask;
59 spinlock_t *locks;
31 struct rhash_head __rcu *buckets[]; 60 struct rhash_head __rcu *buckets[];
32}; 61};
33 62
@@ -45,11 +74,16 @@ struct rhashtable;
45 * @hash_rnd: Seed to use while hashing 74 * @hash_rnd: Seed to use while hashing
46 * @max_shift: Maximum number of shifts while expanding 75 * @max_shift: Maximum number of shifts while expanding
47 * @min_shift: Minimum number of shifts while shrinking 76 * @min_shift: Minimum number of shifts while shrinking
77 * @nulls_base: Base value to generate nulls marker
78 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
48 * @hashfn: Function to hash key 79 * @hashfn: Function to hash key
49 * @obj_hashfn: Function to hash object 80 * @obj_hashfn: Function to hash object
50 * @grow_decision: If defined, may return true if table should expand 81 * @grow_decision: If defined, may return true if table should expand
51 * @shrink_decision: If defined, may return true if table should shrink 82 * @shrink_decision: If defined, may return true if table should shrink
52 * @mutex_is_held: Must return true if protecting mutex is held 83 *
84 * Note: when implementing the grow and shrink decision function, min/max
85 * shift must be enforced, otherwise, resizing watermarks they set may be
86 * useless.
53 */ 87 */
54struct rhashtable_params { 88struct rhashtable_params {
55 size_t nelem_hint; 89 size_t nelem_hint;
@@ -59,36 +93,95 @@ struct rhashtable_params {
59 u32 hash_rnd; 93 u32 hash_rnd;
60 size_t max_shift; 94 size_t max_shift;
61 size_t min_shift; 95 size_t min_shift;
96 u32 nulls_base;
97 size_t locks_mul;
62 rht_hashfn_t hashfn; 98 rht_hashfn_t hashfn;
63 rht_obj_hashfn_t obj_hashfn; 99 rht_obj_hashfn_t obj_hashfn;
64 bool (*grow_decision)(const struct rhashtable *ht, 100 bool (*grow_decision)(const struct rhashtable *ht,
65 size_t new_size); 101 size_t new_size);
66 bool (*shrink_decision)(const struct rhashtable *ht, 102 bool (*shrink_decision)(const struct rhashtable *ht,
67 size_t new_size); 103 size_t new_size);
68#ifdef CONFIG_PROVE_LOCKING
69 int (*mutex_is_held)(void *parent);
70 void *parent;
71#endif
72}; 104};
73 105
74/** 106/**
75 * struct rhashtable - Hash table handle 107 * struct rhashtable - Hash table handle
76 * @tbl: Bucket table 108 * @tbl: Bucket table
109 * @future_tbl: Table under construction during expansion/shrinking
77 * @nelems: Number of elements in table 110 * @nelems: Number of elements in table
78 * @shift: Current size (1 << shift) 111 * @shift: Current size (1 << shift)
79 * @p: Configuration parameters 112 * @p: Configuration parameters
113 * @run_work: Deferred worker to expand/shrink asynchronously
114 * @mutex: Mutex to protect current/future table swapping
115 * @walkers: List of active walkers
116 * @being_destroyed: True if table is set up for destruction
80 */ 117 */
81struct rhashtable { 118struct rhashtable {
82 struct bucket_table __rcu *tbl; 119 struct bucket_table __rcu *tbl;
83 size_t nelems; 120 struct bucket_table __rcu *future_tbl;
84 size_t shift; 121 atomic_t nelems;
122 atomic_t shift;
85 struct rhashtable_params p; 123 struct rhashtable_params p;
124 struct work_struct run_work;
125 struct mutex mutex;
126 struct list_head walkers;
127 bool being_destroyed;
128};
129
130/**
131 * struct rhashtable_walker - Hash table walker
132 * @list: List entry on list of walkers
133 * @resize: Resize event occured
134 */
135struct rhashtable_walker {
136 struct list_head list;
137 bool resize;
86}; 138};
87 139
140/**
141 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
142 * @ht: Table to iterate through
143 * @p: Current pointer
144 * @walker: Associated rhashtable walker
145 * @slot: Current slot
146 * @skip: Number of entries to skip in slot
147 */
148struct rhashtable_iter {
149 struct rhashtable *ht;
150 struct rhash_head *p;
151 struct rhashtable_walker *walker;
152 unsigned int slot;
153 unsigned int skip;
154};
155
156static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
157{
158 return NULLS_MARKER(ht->p.nulls_base + hash);
159}
160
161#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
162 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
163
164static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
165{
166 return ((unsigned long) ptr & 1);
167}
168
169static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
170{
171 return ((unsigned long) ptr) >> 1;
172}
173
88#ifdef CONFIG_PROVE_LOCKING 174#ifdef CONFIG_PROVE_LOCKING
89int lockdep_rht_mutex_is_held(const struct rhashtable *ht); 175int lockdep_rht_mutex_is_held(struct rhashtable *ht);
176int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
90#else 177#else
91static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) 178static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
179{
180 return 1;
181}
182
183static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
184 u32 hash)
92{ 185{
93 return 1; 186 return 1;
94} 187}
@@ -96,13 +189,8 @@ static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
96 189
97int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); 190int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
98 191
99u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
100u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
101
102void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); 192void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
103bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); 193bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
104void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
105 struct rhash_head __rcu **pprev);
106 194
107bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); 195bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
108bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); 196bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
@@ -110,11 +198,23 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
110int rhashtable_expand(struct rhashtable *ht); 198int rhashtable_expand(struct rhashtable *ht);
111int rhashtable_shrink(struct rhashtable *ht); 199int rhashtable_shrink(struct rhashtable *ht);
112 200
113void *rhashtable_lookup(const struct rhashtable *ht, const void *key); 201void *rhashtable_lookup(struct rhashtable *ht, const void *key);
114void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, 202void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
115 bool (*compare)(void *, void *), void *arg); 203 bool (*compare)(void *, void *), void *arg);
116 204
117void rhashtable_destroy(const struct rhashtable *ht); 205bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
206bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
207 struct rhash_head *obj,
208 bool (*compare)(void *, void *),
209 void *arg);
210
211int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
212void rhashtable_walk_exit(struct rhashtable_iter *iter);
213int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
214void *rhashtable_walk_next(struct rhashtable_iter *iter);
215void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
216
217void rhashtable_destroy(struct rhashtable *ht);
118 218
119#define rht_dereference(p, ht) \ 219#define rht_dereference(p, ht) \
120 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) 220 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -122,92 +222,146 @@ void rhashtable_destroy(const struct rhashtable *ht);
122#define rht_dereference_rcu(p, ht) \ 222#define rht_dereference_rcu(p, ht) \
123 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) 223 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
124 224
125#define rht_entry(ptr, type, member) container_of(ptr, type, member) 225#define rht_dereference_bucket(p, tbl, hash) \
126#define rht_entry_safe(ptr, type, member) \ 226 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
127({ \ 227
128 typeof(ptr) __ptr = (ptr); \ 228#define rht_dereference_bucket_rcu(p, tbl, hash) \
129 __ptr ? rht_entry(__ptr, type, member) : NULL; \ 229 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
130})
131 230
132#define rht_next_entry_safe(pos, ht, member) \ 231#define rht_entry(tpos, pos, member) \
133({ \ 232 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
134 pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \ 233
135 typeof(*(pos)), member) : NULL; \ 234/**
136}) 235 * rht_for_each_continue - continue iterating over hash chain
236 * @pos: the &struct rhash_head to use as a loop cursor.
237 * @head: the previous &struct rhash_head to continue from
238 * @tbl: the &struct bucket_table
239 * @hash: the hash value / bucket index
240 */
241#define rht_for_each_continue(pos, head, tbl, hash) \
242 for (pos = rht_dereference_bucket(head, tbl, hash); \
243 !rht_is_a_nulls(pos); \
244 pos = rht_dereference_bucket((pos)->next, tbl, hash))
137 245
138/** 246/**
139 * rht_for_each - iterate over hash chain 247 * rht_for_each - iterate over hash chain
140 * @pos: &struct rhash_head to use as a loop cursor. 248 * @pos: the &struct rhash_head to use as a loop cursor.
141 * @head: head of the hash chain (struct rhash_head *) 249 * @tbl: the &struct bucket_table
142 * @ht: pointer to your struct rhashtable 250 * @hash: the hash value / bucket index
143 */ 251 */
144#define rht_for_each(pos, head, ht) \ 252#define rht_for_each(pos, tbl, hash) \
145 for (pos = rht_dereference(head, ht); \ 253 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
146 pos; \ 254
147 pos = rht_dereference((pos)->next, ht)) 255/**
256 * rht_for_each_entry_continue - continue iterating over hash chain
257 * @tpos: the type * to use as a loop cursor.
258 * @pos: the &struct rhash_head to use as a loop cursor.
259 * @head: the previous &struct rhash_head to continue from
260 * @tbl: the &struct bucket_table
261 * @hash: the hash value / bucket index
262 * @member: name of the &struct rhash_head within the hashable struct.
263 */
264#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
265 for (pos = rht_dereference_bucket(head, tbl, hash); \
266 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
267 pos = rht_dereference_bucket((pos)->next, tbl, hash))
148 268
149/** 269/**
150 * rht_for_each_entry - iterate over hash chain of given type 270 * rht_for_each_entry - iterate over hash chain of given type
151 * @pos: type * to use as a loop cursor. 271 * @tpos: the type * to use as a loop cursor.
152 * @head: head of the hash chain (struct rhash_head *) 272 * @pos: the &struct rhash_head to use as a loop cursor.
153 * @ht: pointer to your struct rhashtable 273 * @tbl: the &struct bucket_table
154 * @member: name of the rhash_head within the hashable struct. 274 * @hash: the hash value / bucket index
275 * @member: name of the &struct rhash_head within the hashable struct.
155 */ 276 */
156#define rht_for_each_entry(pos, head, ht, member) \ 277#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
157 for (pos = rht_entry_safe(rht_dereference(head, ht), \ 278 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
158 typeof(*(pos)), member); \ 279 tbl, hash, member)
159 pos; \
160 pos = rht_next_entry_safe(pos, ht, member))
161 280
162/** 281/**
163 * rht_for_each_entry_safe - safely iterate over hash chain of given type 282 * rht_for_each_entry_safe - safely iterate over hash chain of given type
164 * @pos: type * to use as a loop cursor. 283 * @tpos: the type * to use as a loop cursor.
165 * @n: type * to use for temporary next object storage 284 * @pos: the &struct rhash_head to use as a loop cursor.
166 * @head: head of the hash chain (struct rhash_head *) 285 * @next: the &struct rhash_head to use as next in loop cursor.
167 * @ht: pointer to your struct rhashtable 286 * @tbl: the &struct bucket_table
168 * @member: name of the rhash_head within the hashable struct. 287 * @hash: the hash value / bucket index
288 * @member: name of the &struct rhash_head within the hashable struct.
169 * 289 *
170 * This hash chain list-traversal primitive allows for the looped code to 290 * This hash chain list-traversal primitive allows for the looped code to
171 * remove the loop cursor from the list. 291 * remove the loop cursor from the list.
172 */ 292 */
173#define rht_for_each_entry_safe(pos, n, head, ht, member) \ 293#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
174 for (pos = rht_entry_safe(rht_dereference(head, ht), \ 294 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
175 typeof(*(pos)), member), \ 295 next = !rht_is_a_nulls(pos) ? \
176 n = rht_next_entry_safe(pos, ht, member); \ 296 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
177 pos; \ 297 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
178 pos = n, \ 298 pos = next, \
179 n = rht_next_entry_safe(pos, ht, member)) 299 next = !rht_is_a_nulls(pos) ? \
300 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
301
302/**
303 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
304 * @pos: the &struct rhash_head to use as a loop cursor.
305 * @head: the previous &struct rhash_head to continue from
306 * @tbl: the &struct bucket_table
307 * @hash: the hash value / bucket index
308 *
309 * This hash chain list-traversal primitive may safely run concurrently with
310 * the _rcu mutation primitives such as rhashtable_insert() as long as the
311 * traversal is guarded by rcu_read_lock().
312 */
313#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
314 for (({barrier(); }), \
315 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
316 !rht_is_a_nulls(pos); \
317 pos = rcu_dereference_raw(pos->next))
180 318
181/** 319/**
182 * rht_for_each_rcu - iterate over rcu hash chain 320 * rht_for_each_rcu - iterate over rcu hash chain
183 * @pos: &struct rhash_head to use as a loop cursor. 321 * @pos: the &struct rhash_head to use as a loop cursor.
184 * @head: head of the hash chain (struct rhash_head *) 322 * @tbl: the &struct bucket_table
185 * @ht: pointer to your struct rhashtable 323 * @hash: the hash value / bucket index
324 *
325 * This hash chain list-traversal primitive may safely run concurrently with
326 * the _rcu mutation primitives such as rhashtable_insert() as long as the
327 * traversal is guarded by rcu_read_lock().
328 */
329#define rht_for_each_rcu(pos, tbl, hash) \
330 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
331
332/**
333 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
334 * @tpos: the type * to use as a loop cursor.
335 * @pos: the &struct rhash_head to use as a loop cursor.
336 * @head: the previous &struct rhash_head to continue from
337 * @tbl: the &struct bucket_table
338 * @hash: the hash value / bucket index
339 * @member: name of the &struct rhash_head within the hashable struct.
186 * 340 *
187 * This hash chain list-traversal primitive may safely run concurrently with 341 * This hash chain list-traversal primitive may safely run concurrently with
188 * the _rcu fkht mutation primitives such as rht_insert() as long as the 342 * the _rcu mutation primitives such as rhashtable_insert() as long as the
189 * traversal is guarded by rcu_read_lock(). 343 * traversal is guarded by rcu_read_lock().
190 */ 344 */
191#define rht_for_each_rcu(pos, head, ht) \ 345#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
192 for (pos = rht_dereference_rcu(head, ht); \ 346 for (({barrier(); }), \
193 pos; \ 347 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
194 pos = rht_dereference_rcu((pos)->next, ht)) 348 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
349 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
195 350
196/** 351/**
197 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type 352 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
198 * @pos: type * to use as a loop cursor. 353 * @tpos: the type * to use as a loop cursor.
199 * @head: head of the hash chain (struct rhash_head *) 354 * @pos: the &struct rhash_head to use as a loop cursor.
200 * @member: name of the rhash_head within the hashable struct. 355 * @tbl: the &struct bucket_table
356 * @hash: the hash value / bucket index
357 * @member: name of the &struct rhash_head within the hashable struct.
201 * 358 *
202 * This hash chain list-traversal primitive may safely run concurrently with 359 * This hash chain list-traversal primitive may safely run concurrently with
203 * the _rcu fkht mutation primitives such as rht_insert() as long as the 360 * the _rcu mutation primitives such as rhashtable_insert() as long as the
204 * traversal is guarded by rcu_read_lock(). 361 * traversal is guarded by rcu_read_lock().
205 */ 362 */
206#define rht_for_each_entry_rcu(pos, head, member) \ 363#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
207 for (pos = rht_entry_safe(rcu_dereference_raw(head), \ 364 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
208 typeof(*(pos)), member); \ 365 tbl, hash, member)
209 pos; \
210 pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \
211 typeof(*(pos)), member))
212 366
213#endif /* _LINUX_RHASHTABLE_H */ 367#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c0c2bce6b0b7..c4c559a45dc8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -37,6 +37,16 @@ struct anon_vma {
37 atomic_t refcount; 37 atomic_t refcount;
38 38
39 /* 39 /*
40 * Count of child anon_vmas and VMAs which points to this anon_vma.
41 *
42 * This counter is used for making decision about reusing anon_vma
43 * instead of forking new one. See comments in function anon_vma_clone.
44 */
45 unsigned degree;
46
47 struct anon_vma *parent; /* Parent of this anon_vma */
48
49 /*
40 * NOTE: the LSB of the rb_root.rb_node is set by 50 * NOTE: the LSB of the rb_root.rb_node is set by
41 * mm_take_all_locks() _after_ taking the above lock. So the 51 * mm_take_all_locks() _after_ taking the above lock. So the
42 * rb_root must only be read/written after taking the above lock 52 * rb_root must only be read/written after taking the above lock
@@ -188,7 +198,7 @@ int page_referenced(struct page *, int is_locked,
188int try_to_unmap(struct page *, enum ttu_flags flags); 198int try_to_unmap(struct page *, enum ttu_flags flags);
189 199
190/* 200/*
191 * Called from mm/filemap_xip.c to unmap empty zero page 201 * Used by uprobes to replace a userspace page safely
192 */ 202 */
193pte_t *__page_check_address(struct page *, struct mm_struct *, 203pte_t *__page_check_address(struct page *, struct mm_struct *,
194 unsigned long, spinlock_t **, int); 204 unsigned long, spinlock_t **, int);
@@ -236,7 +246,6 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
236 * arg: passed to rmap_one() and invalid_vma() 246 * arg: passed to rmap_one() and invalid_vma()
237 * rmap_one: executed on each vma where page is mapped 247 * rmap_one: executed on each vma where page is mapped
238 * done: for checking traversing termination condition 248 * done: for checking traversing termination condition
239 * file_nonlinear: for handling file nonlinear mapping
240 * anon_lock: for getting anon_lock by optimized way rather than default 249 * anon_lock: for getting anon_lock by optimized way rather than default
241 * invalid_vma: for skipping uninterested vma 250 * invalid_vma: for skipping uninterested vma
242 */ 251 */
@@ -245,7 +254,6 @@ struct rmap_walk_control {
245 int (*rmap_one)(struct page *page, struct vm_area_struct *vma, 254 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
246 unsigned long addr, void *arg); 255 unsigned long addr, void *arg);
247 int (*done)(struct page *page); 256 int (*done)(struct page *page);
248 int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
249 struct anon_vma *(*anon_lock)(struct page *page); 257 struct anon_vma *(*anon_lock)(struct page *page);
250 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); 258 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
251}; 259};
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 6d6be09a2fe5..dcad7ee0d746 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -161,7 +161,7 @@ extern void devm_rtc_device_unregister(struct device *dev,
161extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); 161extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
162extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); 162extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
163extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); 163extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
164extern int rtc_set_ntp_time(struct timespec now); 164extern int rtc_set_ntp_time(struct timespec64 now);
165int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); 165int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
166extern int rtc_read_alarm(struct rtc_device *rtc, 166extern int rtc_read_alarm(struct rtc_device *rtc,
167 struct rtc_wkalrm *alrm); 167 struct rtc_wkalrm *alrm);
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
new file mode 100644
index 000000000000..e6337a56d741
--- /dev/null
+++ b/include/linux/rtc/ds1685.h
@@ -0,0 +1,375 @@
1/*
2 * Definitions for the registers, addresses, and platform data of the
3 * DS1685/DS1687-series RTC chips.
4 *
5 * This Driver also works for the DS17X85/DS17X87 RTC chips. Functionally
6 * similar to the DS1685/DS1687, they support a few extra features which
7 * include larger, battery-backed NV-SRAM, burst-mode access, and an RTC
8 * write counter.
9 *
10 * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
11 * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
12 *
13 * References:
14 * DS1685/DS1687 3V/5V Real-Time Clocks, 19-5215, Rev 4/10.
15 * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10.
16 * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105.
17 * Application Note 90, Using the Multiplex Bus RTC Extended Features.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 */
23
24#ifndef _LINUX_RTC_DS1685_H_
25#define _LINUX_RTC_DS1685_H_
26
27#include <linux/rtc.h>
28#include <linux/platform_device.h>
29#include <linux/workqueue.h>
30
31/**
32 * struct ds1685_priv - DS1685 private data structure.
33 * @dev: pointer to the rtc_device structure.
34 * @regs: iomapped base address pointer of the RTC registers.
35 * @regstep: padding/step size between registers (optional).
36 * @baseaddr: base address of the RTC device.
37 * @size: resource size.
38 * @lock: private lock variable for spin locking/unlocking.
39 * @work: private workqueue.
40 * @irq: IRQ number assigned to the RTC device.
41 * @prepare_poweroff: pointer to platform pre-poweroff function.
42 * @wake_alarm: pointer to platform wake alarm function.
43 * @post_ram_clear: pointer to platform post ram-clear function.
44 */
45struct ds1685_priv {
46 struct rtc_device *dev;
47 void __iomem *regs;
48 u32 regstep;
49 resource_size_t baseaddr;
50 size_t size;
51 spinlock_t lock;
52 struct work_struct work;
53 int irq_num;
54 bool bcd_mode;
55 bool no_irq;
56 bool uie_unsupported;
57 bool alloc_io_resources;
58 u8 (*read)(struct ds1685_priv *, int);
59 void (*write)(struct ds1685_priv *, int, u8);
60 void (*prepare_poweroff)(void);
61 void (*wake_alarm)(void);
62 void (*post_ram_clear)(void);
63};
64
65
66/**
67 * struct ds1685_rtc_platform_data - platform data structure.
68 * @plat_prepare_poweroff: platform-specific pre-poweroff function.
69 * @plat_wake_alarm: platform-specific wake alarm function.
70 * @plat_post_ram_clear: platform-specific post ram-clear function.
71 *
72 * If your platform needs to use a custom padding/step size between
73 * registers, or uses one or more of the extended interrupts and needs special
74 * handling, then include this header file in your platform definition and
75 * set regstep and the plat_* pointers as appropriate.
76 */
77struct ds1685_rtc_platform_data {
78 const u32 regstep;
79 const bool bcd_mode;
80 const bool no_irq;
81 const bool uie_unsupported;
82 const bool alloc_io_resources;
83 u8 (*plat_read)(struct ds1685_priv *, int);
84 void (*plat_write)(struct ds1685_priv *, int, u8);
85 void (*plat_prepare_poweroff)(void);
86 void (*plat_wake_alarm)(void);
87 void (*plat_post_ram_clear)(void);
88};
89
90
91/*
92 * Time Registers.
93 */
94#define RTC_SECS 0x00 /* Seconds 00-59 */
95#define RTC_SECS_ALARM 0x01 /* Alarm Seconds 00-59 */
96#define RTC_MINS 0x02 /* Minutes 00-59 */
97#define RTC_MINS_ALARM 0x03 /* Alarm Minutes 00-59 */
98#define RTC_HRS 0x04 /* Hours 01-12 AM/PM || 00-23 */
99#define RTC_HRS_ALARM 0x05 /* Alarm Hours 01-12 AM/PM || 00-23 */
100#define RTC_WDAY 0x06 /* Day of Week 01-07 */
101#define RTC_MDAY 0x07 /* Day of Month 01-31 */
102#define RTC_MONTH 0x08 /* Month 01-12 */
103#define RTC_YEAR 0x09 /* Year 00-99 */
104#define RTC_CENTURY 0x48 /* Century 00-99 */
105#define RTC_MDAY_ALARM 0x49 /* Alarm Day of Month 01-31 */
106
107
108/*
109 * Bit masks for the Time registers in BCD Mode (DM = 0).
110 */
111#define RTC_SECS_BCD_MASK 0x7f /* - x x x x x x x */
112#define RTC_MINS_BCD_MASK 0x7f /* - x x x x x x x */
113#define RTC_HRS_12_BCD_MASK 0x1f /* - - - x x x x x */
114#define RTC_HRS_24_BCD_MASK 0x3f /* - - x x x x x x */
115#define RTC_MDAY_BCD_MASK 0x3f /* - - x x x x x x */
116#define RTC_MONTH_BCD_MASK 0x1f /* - - - x x x x x */
117#define RTC_YEAR_BCD_MASK 0xff /* x x x x x x x x */
118
119/*
120 * Bit masks for the Time registers in BIN Mode (DM = 1).
121 */
122#define RTC_SECS_BIN_MASK 0x3f /* - - x x x x x x */
123#define RTC_MINS_BIN_MASK 0x3f /* - - x x x x x x */
124#define RTC_HRS_12_BIN_MASK 0x0f /* - - - - x x x x */
125#define RTC_HRS_24_BIN_MASK 0x1f /* - - - x x x x x */
126#define RTC_MDAY_BIN_MASK 0x1f /* - - - x x x x x */
127#define RTC_MONTH_BIN_MASK 0x0f /* - - - - x x x x */
128#define RTC_YEAR_BIN_MASK 0x7f /* - x x x x x x x */
129
130/*
131 * Bit masks common for the Time registers in BCD or BIN Mode.
132 */
133#define RTC_WDAY_MASK 0x07 /* - - - - - x x x */
134#define RTC_CENTURY_MASK 0xff /* x x x x x x x x */
135#define RTC_MDAY_ALARM_MASK 0xff /* x x x x x x x x */
136#define RTC_HRS_AMPM_MASK BIT(7) /* Mask for the AM/PM bit */
137
138
139
140/*
141 * Control Registers.
142 */
143#define RTC_CTRL_A 0x0a /* Control Register A */
144#define RTC_CTRL_B 0x0b /* Control Register B */
145#define RTC_CTRL_C 0x0c /* Control Register C */
146#define RTC_CTRL_D 0x0d /* Control Register D */
147#define RTC_EXT_CTRL_4A 0x4a /* Extended Control Register 4A */
148#define RTC_EXT_CTRL_4B 0x4b /* Extended Control Register 4B */
149
150
151/*
152 * Bit names in Control Register A.
153 */
154#define RTC_CTRL_A_UIP BIT(7) /* Update In Progress */
155#define RTC_CTRL_A_DV2 BIT(6) /* Countdown Chain */
156#define RTC_CTRL_A_DV1 BIT(5) /* Oscillator Enable */
157#define RTC_CTRL_A_DV0 BIT(4) /* Bank Select */
158#define RTC_CTRL_A_RS2 BIT(2) /* Rate-Selection Bit 2 */
159#define RTC_CTRL_A_RS3 BIT(3) /* Rate-Selection Bit 3 */
160#define RTC_CTRL_A_RS1 BIT(1) /* Rate-Selection Bit 1 */
161#define RTC_CTRL_A_RS0 BIT(0) /* Rate-Selection Bit 0 */
162#define RTC_CTRL_A_RS_MASK 0x0f /* RS3 + RS2 + RS1 + RS0 */
163
164/*
165 * Bit names in Control Register B.
166 */
167#define RTC_CTRL_B_SET BIT(7) /* SET Bit */
168#define RTC_CTRL_B_PIE BIT(6) /* Periodic-Interrupt Enable */
169#define RTC_CTRL_B_AIE BIT(5) /* Alarm-Interrupt Enable */
170#define RTC_CTRL_B_UIE BIT(4) /* Update-Ended Interrupt-Enable */
171#define RTC_CTRL_B_SQWE BIT(3) /* Square-Wave Enable */
172#define RTC_CTRL_B_DM BIT(2) /* Data Mode */
173#define RTC_CTRL_B_2412 BIT(1) /* 12-Hr/24-Hr Mode */
174#define RTC_CTRL_B_DSE BIT(0) /* Daylight Savings Enable */
175#define RTC_CTRL_B_PAU_MASK 0x70 /* PIE + AIE + UIE */
176
177
178/*
179 * Bit names in Control Register C.
180 *
181 * BIT(0), BIT(1), BIT(2), & BIT(3) are unused, always return 0, and cannot
182 * be written to.
183 */
184#define RTC_CTRL_C_IRQF BIT(7) /* Interrupt-Request Flag */
185#define RTC_CTRL_C_PF BIT(6) /* Periodic-Interrupt Flag */
186#define RTC_CTRL_C_AF BIT(5) /* Alarm-Interrupt Flag */
187#define RTC_CTRL_C_UF BIT(4) /* Update-Ended Interrupt Flag */
188#define RTC_CTRL_C_PAU_MASK 0x70 /* PF + AF + UF */
189
190
191/*
192 * Bit names in Control Register D.
193 *
194 * BIT(0) through BIT(6) are unused, always return 0, and cannot
195 * be written to.
196 */
197#define RTC_CTRL_D_VRT BIT(7) /* Valid RAM and Time */
198
199
200/*
201 * Bit names in Extended Control Register 4A.
202 *
203 * On the DS1685/DS1687/DS1689/DS1693, BIT(4) and BIT(5) are reserved for
204 * future use. They can be read from and written to, but have no effect
205 * on the RTC's operation.
206 *
207 * On the DS17x85/DS17x87, BIT(5) is Burst-Mode Enable (BME), and allows
208 * access to the extended NV-SRAM by automatically incrementing the address
209 * register when they are read from or written to.
210 */
211#define RTC_CTRL_4A_VRT2 BIT(7) /* Auxillary Battery Status */
212#define RTC_CTRL_4A_INCR BIT(6) /* Increment-in-Progress Status */
213#define RTC_CTRL_4A_PAB BIT(3) /* Power-Active Bar Control */
214#define RTC_CTRL_4A_RF BIT(2) /* RAM-Clear Flag */
215#define RTC_CTRL_4A_WF BIT(1) /* Wake-Up Alarm Flag */
216#define RTC_CTRL_4A_KF BIT(0) /* Kickstart Flag */
217#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
218#define RTC_CTRL_4A_BME BIT(5) /* Burst-Mode Enable */
219#endif
220#define RTC_CTRL_4A_RWK_MASK 0x07 /* RF + WF + KF */
221
222
223/*
224 * Bit names in Extended Control Register 4B.
225 */
226#define RTC_CTRL_4B_ABE BIT(7) /* Auxillary Battery Enable */
227#define RTC_CTRL_4B_E32K BIT(6) /* Enable 32.768Hz on SQW Pin */
228#define RTC_CTRL_4B_CS BIT(5) /* Crystal Select */
229#define RTC_CTRL_4B_RCE BIT(4) /* RAM Clear-Enable */
230#define RTC_CTRL_4B_PRS BIT(3) /* PAB Reset-Select */
231#define RTC_CTRL_4B_RIE BIT(2) /* RAM Clear-Interrupt Enable */
232#define RTC_CTRL_4B_WIE BIT(1) /* Wake-Up Alarm-Interrupt Enable */
233#define RTC_CTRL_4B_KSE BIT(0) /* Kickstart Interrupt-Enable */
234#define RTC_CTRL_4B_RWK_MASK 0x07 /* RIE + WIE + KSE */
235
236
237/*
238 * Misc register names in Bank 1.
239 *
240 * The DV0 bit in Control Register A must be set to 1 for these registers
241 * to become available, including Extended Control Registers 4A & 4B.
242 */
243#define RTC_BANK1_SSN_MODEL 0x40 /* Model Number */
244#define RTC_BANK1_SSN_BYTE_1 0x41 /* 1st Byte of Serial Number */
245#define RTC_BANK1_SSN_BYTE_2 0x42 /* 2nd Byte of Serial Number */
246#define RTC_BANK1_SSN_BYTE_3 0x43 /* 3rd Byte of Serial Number */
247#define RTC_BANK1_SSN_BYTE_4 0x44 /* 4th Byte of Serial Number */
248#define RTC_BANK1_SSN_BYTE_5 0x45 /* 5th Byte of Serial Number */
249#define RTC_BANK1_SSN_BYTE_6 0x46 /* 6th Byte of Serial Number */
250#define RTC_BANK1_SSN_CRC 0x47 /* Serial CRC Byte */
251#define RTC_BANK1_RAM_DATA_PORT 0x53 /* Extended RAM Data Port */
252
253
254/*
255 * Model-specific registers in Bank 1.
256 *
257 * The addresses below differ depending on the model of the RTC chip
258 * selected in the kernel configuration. Not all of these features are
259 * supported in the main driver at present.
260 *
261 * DS1685/DS1687 - Extended NV-SRAM address (LSB only).
262 * DS1689/DS1693 - Vcc, Vbat, Pwr Cycle Counters & Customer-specific S/N.
263 * DS17x85/DS17x87 - Extended NV-SRAM addresses (MSB & LSB) & Write counter.
264 */
265#if defined(CONFIG_RTC_DRV_DS1685)
266#define RTC_BANK1_RAM_ADDR 0x50 /* NV-SRAM Addr */
267#elif defined(CONFIG_RTC_DRV_DS1689)
268#define RTC_BANK1_VCC_CTR_LSB 0x54 /* Vcc Counter Addr (LSB) */
269#define RTC_BANK1_VCC_CTR_MSB 0x57 /* Vcc Counter Addr (MSB) */
270#define RTC_BANK1_VBAT_CTR_LSB 0x58 /* Vbat Counter Addr (LSB) */
271#define RTC_BANK1_VBAT_CTR_MSB 0x5b /* Vbat Counter Addr (MSB) */
272#define RTC_BANK1_PWR_CTR_LSB 0x5c /* Pwr Cycle Counter Addr (LSB) */
273#define RTC_BANK1_PWR_CTR_MSB 0x5d /* Pwr Cycle Counter Addr (MSB) */
274#define RTC_BANK1_UNIQ_SN 0x60 /* Customer-specific S/N */
275#else /* DS17x85/DS17x87 */
276#define RTC_BANK1_RAM_ADDR_LSB 0x50 /* NV-SRAM Addr (LSB) */
277#define RTC_BANK1_RAM_ADDR_MSB 0x51 /* NV-SRAM Addr (MSB) */
278#define RTC_BANK1_WRITE_CTR 0x5e /* RTC Write Counter */
279#endif
280
281
282/*
283 * Model numbers.
284 *
285 * The DS1688/DS1691 and DS1689/DS1693 chips share the same model number
286 * and the manual doesn't indicate any major differences. As such, they
287 * are regarded as the same chip in this driver.
288 */
289#define RTC_MODEL_DS1685 0x71 /* DS1685/DS1687 */
290#define RTC_MODEL_DS17285 0x72 /* DS17285/DS17287 */
291#define RTC_MODEL_DS1689 0x73 /* DS1688/DS1691/DS1689/DS1693 */
292#define RTC_MODEL_DS17485 0x74 /* DS17485/DS17487 */
293#define RTC_MODEL_DS17885 0x78 /* DS17885/DS17887 */
294
295
296/*
297 * Periodic Interrupt Rates / Square-Wave Output Frequency
298 *
299 * Periodic rates are selected by setting the RS3-RS0 bits in Control
300 * Register A and enabled via either the E32K bit in Extended Control
301 * Register 4B or the SQWE bit in Control Register B.
302 *
303 * E32K overrides the settings of RS3-RS0 and outputs a frequency of 32768Hz
304 * on the SQW pin of the RTC chip. While there are 16 possible selections,
305 * the 1-of-16 decoder is only able to divide the base 32768Hz signal into 13
306 * smaller frequencies. The values 0x01 and 0x02 are not used and are
307 * synonymous with 0x08 and 0x09, respectively.
308 *
309 * When E32K is set to a logic 1, periodic interrupts are disabled and reading
310 * /dev/rtc will return -EINVAL. This also applies if the periodic interrupt
311 * frequency is set to 0Hz.
312 *
313 * Not currently used by the rtc-ds1685 driver because the RTC core removed
314 * support for hardware-generated periodic-interrupts in favour of
315 * hrtimer-generated interrupts. But these defines are kept around for use
316 * in userland, as documentation to the hardware, and possible future use if
317 * hardware-generated periodic interrupts are ever added back.
318 */
319 /* E32K RS3 RS2 RS1 RS0 */
320#define RTC_SQW_8192HZ 0x03 /* 0 0 0 1 1 */
321#define RTC_SQW_4096HZ 0x04 /* 0 0 1 0 0 */
322#define RTC_SQW_2048HZ 0x05 /* 0 0 1 0 1 */
323#define RTC_SQW_1024HZ 0x06 /* 0 0 1 1 0 */
324#define RTC_SQW_512HZ 0x07 /* 0 0 1 1 1 */
325#define RTC_SQW_256HZ 0x08 /* 0 1 0 0 0 */
326#define RTC_SQW_128HZ 0x09 /* 0 1 0 0 1 */
327#define RTC_SQW_64HZ 0x0a /* 0 1 0 1 0 */
328#define RTC_SQW_32HZ 0x0b /* 0 1 0 1 1 */
329#define RTC_SQW_16HZ 0x0c /* 0 1 1 0 0 */
330#define RTC_SQW_8HZ 0x0d /* 0 1 1 0 1 */
331#define RTC_SQW_4HZ 0x0e /* 0 1 1 1 0 */
332#define RTC_SQW_2HZ 0x0f /* 0 1 1 1 1 */
333#define RTC_SQW_0HZ 0x00 /* 0 0 0 0 0 */
334#define RTC_SQW_32768HZ 32768 /* 1 - - - - */
335#define RTC_MAX_USER_FREQ 8192
336
337
338/*
339 * NVRAM data & addresses:
340 * - 50 bytes of NVRAM are available just past the clock registers.
341 * - 64 additional bytes are available in Bank0.
342 *
343 * Extended, battery-backed NV-SRAM:
344 * - DS1685/DS1687 - 128 bytes.
345 * - DS1689/DS1693 - 0 bytes.
346 * - DS17285/DS17287 - 2048 bytes.
347 * - DS17485/DS17487 - 4096 bytes.
348 * - DS17885/DS17887 - 8192 bytes.
349 */
350#define NVRAM_TIME_BASE 0x0e /* NVRAM Addr in Time regs */
351#define NVRAM_BANK0_BASE 0x40 /* NVRAM Addr in Bank0 regs */
352#define NVRAM_SZ_TIME 50
353#define NVRAM_SZ_BANK0 64
354#if defined(CONFIG_RTC_DRV_DS1685)
355# define NVRAM_SZ_EXTND 128
356#elif defined(CONFIG_RTC_DRV_DS1689)
357# define NVRAM_SZ_EXTND 0
358#elif defined(CONFIG_RTC_DRV_DS17285)
359# define NVRAM_SZ_EXTND 2048
360#elif defined(CONFIG_RTC_DRV_DS17485)
361# define NVRAM_SZ_EXTND 4096
362#elif defined(CONFIG_RTC_DRV_DS17885)
363# define NVRAM_SZ_EXTND 8192
364#endif
365#define NVRAM_TOTAL_SZ_BANK0 (NVRAM_SZ_TIME + NVRAM_SZ_BANK0)
366#define NVRAM_TOTAL_SZ (NVRAM_TOTAL_SZ_BANK0 + NVRAM_SZ_EXTND)
367
368
369/*
370 * Function Prototypes.
371 */
372extern void __noreturn
373ds1685_rtc_poweroff(struct platform_device *pdev);
374
375#endif /* _LINUX_RTC_DS1685_H_ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8db31ef98d2f..41c60e5302d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1370,6 +1370,8 @@ struct task_struct {
1370 1370
1371 unsigned long atomic_flags; /* Flags needing atomic access. */ 1371 unsigned long atomic_flags; /* Flags needing atomic access. */
1372 1372
1373 struct restart_block restart_block;
1374
1373 pid_t pid; 1375 pid_t pid;
1374 pid_t tgid; 1376 pid_t tgid;
1375 1377
@@ -1662,6 +1664,9 @@ struct task_struct {
1662 unsigned long timer_slack_ns; 1664 unsigned long timer_slack_ns;
1663 unsigned long default_timer_slack_ns; 1665 unsigned long default_timer_slack_ns;
1664 1666
1667#ifdef CONFIG_KASAN
1668 unsigned int kasan_depth;
1669#endif
1665#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1670#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1666 /* Index of current stored address in ret_stack */ 1671 /* Index of current stored address in ret_stack */
1667 int curr_ret_stack; 1672 int curr_ret_stack;
@@ -2145,6 +2150,7 @@ extern unsigned long long notrace sched_clock(void);
2145 */ 2150 */
2146extern u64 cpu_clock(int cpu); 2151extern u64 cpu_clock(int cpu);
2147extern u64 local_clock(void); 2152extern u64 local_clock(void);
2153extern u64 running_clock(void);
2148extern u64 sched_clock_cpu(int cpu); 2154extern u64 sched_clock_cpu(int cpu);
2149 2155
2150 2156
diff --git a/include/linux/security.h b/include/linux/security.h
index ba96471c11ba..a1b7dbd127ff 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1281,6 +1281,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1281 * @alter contains the flag indicating whether changes are to be made. 1281 * @alter contains the flag indicating whether changes are to be made.
1282 * Return 0 if permission is granted. 1282 * Return 0 if permission is granted.
1283 * 1283 *
1284 * @binder_set_context_mgr
1285 * Check whether @mgr is allowed to be the binder context manager.
1286 * @mgr contains the task_struct for the task being registered.
1287 * Return 0 if permission is granted.
1288 * @binder_transaction
1289 * Check whether @from is allowed to invoke a binder transaction call
1290 * to @to.
1291 * @from contains the task_struct for the sending task.
1292 * @to contains the task_struct for the receiving task.
1293 * @binder_transfer_binder
1294 * Check whether @from is allowed to transfer a binder reference to @to.
1295 * @from contains the task_struct for the sending task.
1296 * @to contains the task_struct for the receiving task.
1297 * @binder_transfer_file
1298 * Check whether @from is allowed to transfer @file to @to.
1299 * @from contains the task_struct for the sending task.
1300 * @file contains the struct file being transferred.
1301 * @to contains the task_struct for the receiving task.
1302 *
1284 * @ptrace_access_check: 1303 * @ptrace_access_check:
1285 * Check permission before allowing the current process to trace the 1304 * Check permission before allowing the current process to trace the
1286 * @child process. 1305 * @child process.
@@ -1441,6 +1460,14 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1441struct security_operations { 1460struct security_operations {
1442 char name[SECURITY_NAME_MAX + 1]; 1461 char name[SECURITY_NAME_MAX + 1];
1443 1462
1463 int (*binder_set_context_mgr) (struct task_struct *mgr);
1464 int (*binder_transaction) (struct task_struct *from,
1465 struct task_struct *to);
1466 int (*binder_transfer_binder) (struct task_struct *from,
1467 struct task_struct *to);
1468 int (*binder_transfer_file) (struct task_struct *from,
1469 struct task_struct *to, struct file *file);
1470
1444 int (*ptrace_access_check) (struct task_struct *child, unsigned int mode); 1471 int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
1445 int (*ptrace_traceme) (struct task_struct *parent); 1472 int (*ptrace_traceme) (struct task_struct *parent);
1446 int (*capget) (struct task_struct *target, 1473 int (*capget) (struct task_struct *target,
@@ -1739,6 +1766,13 @@ extern void __init security_fixup_ops(struct security_operations *ops);
1739 1766
1740 1767
1741/* Security operations */ 1768/* Security operations */
1769int security_binder_set_context_mgr(struct task_struct *mgr);
1770int security_binder_transaction(struct task_struct *from,
1771 struct task_struct *to);
1772int security_binder_transfer_binder(struct task_struct *from,
1773 struct task_struct *to);
1774int security_binder_transfer_file(struct task_struct *from,
1775 struct task_struct *to, struct file *file);
1742int security_ptrace_access_check(struct task_struct *child, unsigned int mode); 1776int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
1743int security_ptrace_traceme(struct task_struct *parent); 1777int security_ptrace_traceme(struct task_struct *parent);
1744int security_capget(struct task_struct *target, 1778int security_capget(struct task_struct *target,
@@ -1927,6 +1961,30 @@ static inline int security_init(void)
1927 return 0; 1961 return 0;
1928} 1962}
1929 1963
1964static inline int security_binder_set_context_mgr(struct task_struct *mgr)
1965{
1966 return 0;
1967}
1968
1969static inline int security_binder_transaction(struct task_struct *from,
1970 struct task_struct *to)
1971{
1972 return 0;
1973}
1974
1975static inline int security_binder_transfer_binder(struct task_struct *from,
1976 struct task_struct *to)
1977{
1978 return 0;
1979}
1980
1981static inline int security_binder_transfer_file(struct task_struct *from,
1982 struct task_struct *to,
1983 struct file *file)
1984{
1985 return 0;
1986}
1987
1930static inline int security_ptrace_access_check(struct task_struct *child, 1988static inline int security_ptrace_access_check(struct task_struct *child,
1931 unsigned int mode) 1989 unsigned int mode)
1932{ 1990{
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 9aafe0e24c68..fb7eb9ccb1cd 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -125,9 +125,6 @@ extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
125 unsigned int len); 125 unsigned int len);
126extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); 126extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
127 127
128extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
129 int nmaskbits);
130
131#ifdef CONFIG_BINARY_PRINTF 128#ifdef CONFIG_BINARY_PRINTF
132extern int 129extern int
133seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); 130seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index cf6a9daaaf6d..afbb1fd77c77 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -126,31 +126,6 @@ int seq_path(struct seq_file *, const struct path *, const char *);
126int seq_dentry(struct seq_file *, struct dentry *, const char *); 126int seq_dentry(struct seq_file *, struct dentry *, const char *);
127int seq_path_root(struct seq_file *m, const struct path *path, 127int seq_path_root(struct seq_file *m, const struct path *path,
128 const struct path *root, const char *esc); 128 const struct path *root, const char *esc);
129int seq_bitmap(struct seq_file *m, const unsigned long *bits,
130 unsigned int nr_bits);
131static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
132{
133 return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids);
134}
135
136static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
137{
138 return seq_bitmap(m, mask->bits, MAX_NUMNODES);
139}
140
141int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
142 unsigned int nr_bits);
143
144static inline int seq_cpumask_list(struct seq_file *m,
145 const struct cpumask *mask)
146{
147 return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids);
148}
149
150static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
151{
152 return seq_bitmap_list(m, mask->bits, MAX_NUMNODES);
153}
154 129
155int single_open(struct file *, int (*)(struct seq_file *, void *), void *); 130int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
156int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); 131int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index e02acf0a0ec9..a8efa235b7c1 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -85,6 +85,9 @@ struct uart_8250_port {
85 unsigned char mcr_force; /* mask of forced bits */ 85 unsigned char mcr_force; /* mask of forced bits */
86 unsigned char cur_iotype; /* Running I/O type */ 86 unsigned char cur_iotype; /* Running I/O type */
87 unsigned int rpm_tx_active; 87 unsigned int rpm_tx_active;
88 unsigned char canary; /* non-zero during system sleep
89 * if no_console_suspend
90 */
88 91
89 /* 92 /*
90 * Some bits in registers are cleared on a read, so they must 93 * Some bits in registers are cleared on a read, so they must
@@ -126,6 +129,7 @@ extern int serial8250_do_startup(struct uart_port *port);
126extern void serial8250_do_shutdown(struct uart_port *port); 129extern void serial8250_do_shutdown(struct uart_port *port);
127extern void serial8250_do_pm(struct uart_port *port, unsigned int state, 130extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
128 unsigned int oldstate); 131 unsigned int oldstate);
132extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
129extern int fsl8250_handle_irq(struct uart_port *port); 133extern int fsl8250_handle_irq(struct uart_port *port);
130int serial8250_handle_irq(struct uart_port *port, unsigned int iir); 134int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
131unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); 135unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 057038cf2788..baf3e1d08416 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -123,6 +123,7 @@ struct uart_port {
123 void (*set_termios)(struct uart_port *, 123 void (*set_termios)(struct uart_port *,
124 struct ktermios *new, 124 struct ktermios *new,
125 struct ktermios *old); 125 struct ktermios *old);
126 void (*set_mctrl)(struct uart_port *, unsigned int);
126 int (*startup)(struct uart_port *port); 127 int (*startup)(struct uart_port *port);
127 void (*shutdown)(struct uart_port *port); 128 void (*shutdown)(struct uart_port *port);
128 void (*throttle)(struct uart_port *port); 129 void (*throttle)(struct uart_port *port);
@@ -190,8 +191,10 @@ struct uart_port {
190#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) 191#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
191#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) 192#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
192 193
193/* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */ 194/* Port has hardware-assisted h/w flow control */
194#define UPF_HARD_FLOW ((__force upf_t) (1 << 21)) 195#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
196#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
197#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
195/* Port has hardware-assisted s/w flow control */ 198/* Port has hardware-assisted s/w flow control */
196#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22)) 199#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
197#define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) 200#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
@@ -213,11 +216,17 @@ struct uart_port {
213#error Change mask not equivalent to userspace-visible bit defines 216#error Change mask not equivalent to userspace-visible bit defines
214#endif 217#endif
215 218
216 /* status must be updated while holding port lock */ 219 /*
220 * Must hold termios_rwsem, port mutex and port lock to change;
221 * can hold any one lock to read.
222 */
217 upstat_t status; 223 upstat_t status;
218 224
219#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0)) 225#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
220#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1)) 226#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
227#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2))
228#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3))
229#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
221 230
222 int hw_stopped; /* sw-assisted CTS flow state */ 231 int hw_stopped; /* sw-assisted CTS flow state */
223 unsigned int mctrl; /* current modem ctrl settings */ 232 unsigned int mctrl; /* current modem ctrl settings */
@@ -391,6 +400,13 @@ static inline bool uart_cts_enabled(struct uart_port *uport)
391 return !!(uport->status & UPSTAT_CTS_ENABLE); 400 return !!(uport->status & UPSTAT_CTS_ENABLE);
392} 401}
393 402
403static inline bool uart_softcts_mode(struct uart_port *uport)
404{
405 upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS;
406
407 return ((uport->status & mask) == UPSTAT_CTS_ENABLE);
408}
409
394/* 410/*
395 * The following are helper functions for the low level drivers. 411 * The following are helper functions for the low level drivers.
396 */ 412 */
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index e6fc9567690b..a7f004a3c177 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -104,6 +104,31 @@
104 S3C2410_UCON_RXIRQMODE | \ 104 S3C2410_UCON_RXIRQMODE | \
105 S3C2410_UCON_RXFIFO_TOI) 105 S3C2410_UCON_RXFIFO_TOI)
106 106
107#define S3C64XX_UCON_TXBURST_1 (0<<20)
108#define S3C64XX_UCON_TXBURST_4 (1<<20)
109#define S3C64XX_UCON_TXBURST_8 (2<<20)
110#define S3C64XX_UCON_TXBURST_16 (3<<20)
111#define S3C64XX_UCON_TXBURST_MASK (0xf<<20)
112#define S3C64XX_UCON_RXBURST_1 (0<<16)
113#define S3C64XX_UCON_RXBURST_4 (1<<16)
114#define S3C64XX_UCON_RXBURST_8 (2<<16)
115#define S3C64XX_UCON_RXBURST_16 (3<<16)
116#define S3C64XX_UCON_RXBURST_MASK (0xf<<16)
117#define S3C64XX_UCON_TIMEOUT_SHIFT (12)
118#define S3C64XX_UCON_TIMEOUT_MASK (0xf<<12)
119#define S3C64XX_UCON_EMPTYINT_EN (1<<11)
120#define S3C64XX_UCON_DMASUS_EN (1<<10)
121#define S3C64XX_UCON_TXINT_LEVEL (1<<9)
122#define S3C64XX_UCON_RXINT_LEVEL (1<<8)
123#define S3C64XX_UCON_TIMEOUT_EN (1<<7)
124#define S3C64XX_UCON_ERRINT_EN (1<<6)
125#define S3C64XX_UCON_TXMODE_DMA (2<<2)
126#define S3C64XX_UCON_TXMODE_CPU (1<<2)
127#define S3C64XX_UCON_TXMODE_MASK (3<<2)
128#define S3C64XX_UCON_RXMODE_DMA (2<<0)
129#define S3C64XX_UCON_RXMODE_CPU (1<<0)
130#define S3C64XX_UCON_RXMODE_MASK (3<<0)
131
107#define S3C2410_UFCON_FIFOMODE (1<<0) 132#define S3C2410_UFCON_FIFOMODE (1<<0)
108#define S3C2410_UFCON_TXTRIG0 (0<<6) 133#define S3C2410_UFCON_TXTRIG0 (0<<6)
109#define S3C2410_UFCON_RXTRIG8 (1<<4) 134#define S3C2410_UFCON_RXTRIG8 (1<<4)
@@ -155,6 +180,7 @@
155#define S3C2440_UFSTAT_TXMASK (63<<8) 180#define S3C2440_UFSTAT_TXMASK (63<<8)
156#define S3C2440_UFSTAT_RXMASK (63) 181#define S3C2440_UFSTAT_RXMASK (63)
157 182
183#define S3C2410_UTRSTAT_TIMEOUT (1<<3)
158#define S3C2410_UTRSTAT_TXE (1<<2) 184#define S3C2410_UTRSTAT_TXE (1<<2)
159#define S3C2410_UTRSTAT_TXFE (1<<1) 185#define S3C2410_UTRSTAT_TXFE (1<<1)
160#define S3C2410_UTRSTAT_RXDR (1<<0) 186#define S3C2410_UTRSTAT_RXDR (1<<0)
@@ -179,8 +205,10 @@
179#define S3C64XX_UINTM 0x38 205#define S3C64XX_UINTM 0x38
180 206
181#define S3C64XX_UINTM_RXD (0) 207#define S3C64XX_UINTM_RXD (0)
208#define S3C64XX_UINTM_ERROR (1)
182#define S3C64XX_UINTM_TXD (2) 209#define S3C64XX_UINTM_TXD (2)
183#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD) 210#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
211#define S3C64XX_UINTM_ERR_MSK (1 << S3C64XX_UINTM_ERROR)
184#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD) 212#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
185 213
186/* Following are specific to S5PV210 */ 214/* Following are specific to S5PV210 */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index f4aee75f00b1..4fcacd915d45 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -20,6 +20,9 @@ struct shrink_control {
20 20
21 /* current node being shrunk (for NUMA aware shrinkers) */ 21 /* current node being shrunk (for NUMA aware shrinkers) */
22 int nid; 22 int nid;
23
24 /* current memcg being shrunk (for memcg aware shrinkers) */
25 struct mem_cgroup *memcg;
23}; 26};
24 27
25#define SHRINK_STOP (~0UL) 28#define SHRINK_STOP (~0UL)
@@ -61,7 +64,8 @@ struct shrinker {
61#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ 64#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
62 65
63/* Flags */ 66/* Flags */
64#define SHRINKER_NUMA_AWARE (1 << 0) 67#define SHRINKER_NUMA_AWARE (1 << 0)
68#define SHRINKER_MEMCG_AWARE (1 << 1)
65 69
66extern int register_shrinker(struct shrinker *); 70extern int register_shrinker(struct shrinker *);
67extern void unregister_shrinker(struct shrinker *); 71extern void unregister_shrinker(struct shrinker *);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 85ab7d72b54c..1bb36edb66b9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -626,8 +626,11 @@ struct sk_buff {
626 __u32 hash; 626 __u32 hash;
627 __be16 vlan_proto; 627 __be16 vlan_proto;
628 __u16 vlan_tci; 628 __u16 vlan_tci;
629#ifdef CONFIG_NET_RX_BUSY_POLL 629#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
630 unsigned int napi_id; 630 union {
631 unsigned int napi_id;
632 unsigned int sender_cpu;
633 };
631#endif 634#endif
632#ifdef CONFIG_NETWORK_SECMARK 635#ifdef CONFIG_NETWORK_SECMARK
633 __u32 secmark; 636 __u32 secmark;
@@ -2484,19 +2487,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2484} 2487}
2485 2488
2486static inline int skb_add_data(struct sk_buff *skb, 2489static inline int skb_add_data(struct sk_buff *skb,
2487 char __user *from, int copy) 2490 struct iov_iter *from, int copy)
2488{ 2491{
2489 const int off = skb->len; 2492 const int off = skb->len;
2490 2493
2491 if (skb->ip_summed == CHECKSUM_NONE) { 2494 if (skb->ip_summed == CHECKSUM_NONE) {
2492 int err = 0; 2495 __wsum csum = 0;
2493 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 2496 if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
2494 copy, 0, &err); 2497 &csum, from) == copy) {
2495 if (!err) {
2496 skb->csum = csum_block_add(skb->csum, csum, off); 2498 skb->csum = csum_block_add(skb->csum, csum, off);
2497 return 0; 2499 return 0;
2498 } 2500 }
2499 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 2501 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
2500 return 0; 2502 return 0;
2501 2503
2502 __skb_trim(skb, off); 2504 __skb_trim(skb, off);
@@ -2693,8 +2695,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
2693 2695
2694static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) 2696static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
2695{ 2697{
2696 /* XXX: stripping const */ 2698 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2697 return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
2698} 2699}
2699 2700
2700static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) 2701static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
@@ -3071,7 +3072,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3071 3072
3072#define skb_checksum_validate_zero_check(skb, proto, check, \ 3073#define skb_checksum_validate_zero_check(skb, proto, check, \
3073 compute_pseudo) \ 3074 compute_pseudo) \
3074 __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo) 3075 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3075 3076
3076#define skb_checksum_simple_validate(skb) \ 3077#define skb_checksum_simple_validate(skb) \
3077 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) 3078 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
@@ -3096,6 +3097,27 @@ do { \
3096 compute_pseudo(skb, proto)); \ 3097 compute_pseudo(skb, proto)); \
3097} while (0) 3098} while (0)
3098 3099
3100/* Update skbuf and packet to reflect the remote checksum offload operation.
3101 * When called, ptr indicates the starting point for skb->csum when
3102 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
3103 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
3104 */
3105static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3106 int start, int offset)
3107{
3108 __wsum delta;
3109
3110 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3111 __skb_checksum_complete(skb);
3112 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3113 }
3114
3115 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3116
3117 /* Adjust skb->csum since we changed the packet */
3118 skb->csum = csum_add(skb->csum, delta);
3119}
3120
3099#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3121#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3100void nf_conntrack_destroy(struct nf_conntrack *nfct); 3122void nf_conntrack_destroy(struct nf_conntrack *nfct);
3101static inline void nf_conntrack_put(struct nf_conntrack *nfct) 3123static inline void nf_conntrack_put(struct nf_conntrack *nfct)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9a139b637069..76f1feeabd38 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -104,6 +104,7 @@
104 (unsigned long)ZERO_SIZE_PTR) 104 (unsigned long)ZERO_SIZE_PTR)
105 105
106#include <linux/kmemleak.h> 106#include <linux/kmemleak.h>
107#include <linux/kasan.h>
107 108
108struct mem_cgroup; 109struct mem_cgroup;
109/* 110/*
@@ -115,14 +116,12 @@ int slab_is_available(void);
115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 116struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long, 117 unsigned long,
117 void (*)(void *)); 118 void (*)(void *));
118#ifdef CONFIG_MEMCG_KMEM
119struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *,
120 struct kmem_cache *,
121 const char *);
122#endif
123void kmem_cache_destroy(struct kmem_cache *); 119void kmem_cache_destroy(struct kmem_cache *);
124int kmem_cache_shrink(struct kmem_cache *); 120int kmem_cache_shrink(struct kmem_cache *);
125void kmem_cache_free(struct kmem_cache *, void *); 121
122void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
123void memcg_deactivate_kmem_caches(struct mem_cgroup *);
124void memcg_destroy_kmem_caches(struct mem_cgroup *);
126 125
127/* 126/*
128 * Please use this macro to create slab caches. Simply specify the 127 * Please use this macro to create slab caches. Simply specify the
@@ -289,6 +288,7 @@ static __always_inline int kmalloc_index(size_t size)
289 288
290void *__kmalloc(size_t size, gfp_t flags); 289void *__kmalloc(size_t size, gfp_t flags);
291void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
291void kmem_cache_free(struct kmem_cache *, void *);
292 292
293#ifdef CONFIG_NUMA 293#ifdef CONFIG_NUMA
294void *__kmalloc_node(size_t size, gfp_t flags, int node); 294void *__kmalloc_node(size_t size, gfp_t flags, int node);
@@ -326,7 +326,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
326static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, 326static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
327 gfp_t flags, size_t size) 327 gfp_t flags, size_t size)
328{ 328{
329 return kmem_cache_alloc(s, flags); 329 void *ret = kmem_cache_alloc(s, flags);
330
331 kasan_kmalloc(s, ret, size);
332 return ret;
330} 333}
331 334
332static __always_inline void * 335static __always_inline void *
@@ -334,7 +337,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
334 gfp_t gfpflags, 337 gfp_t gfpflags,
335 int node, size_t size) 338 int node, size_t size)
336{ 339{
337 return kmem_cache_alloc_node(s, gfpflags, node); 340 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
341
342 kasan_kmalloc(s, ret, size);
343 return ret;
338} 344}
339#endif /* CONFIG_TRACING */ 345#endif /* CONFIG_TRACING */
340 346
@@ -474,14 +480,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
474#ifndef ARCH_SLAB_MINALIGN 480#ifndef ARCH_SLAB_MINALIGN
475#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 481#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
476#endif 482#endif
483
484struct memcg_cache_array {
485 struct rcu_head rcu;
486 struct kmem_cache *entries[0];
487};
488
477/* 489/*
478 * This is the main placeholder for memcg-related information in kmem caches. 490 * This is the main placeholder for memcg-related information in kmem caches.
479 * struct kmem_cache will hold a pointer to it, so the memory cost while
480 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
481 * would otherwise be if that would be bundled in kmem_cache: we'll need an
482 * extra pointer chase. But the trade off clearly lays in favor of not
483 * penalizing non-users.
484 *
485 * Both the root cache and the child caches will have it. For the root cache, 491 * Both the root cache and the child caches will have it. For the root cache,
486 * this will hold a dynamically allocated array large enough to hold 492 * this will hold a dynamically allocated array large enough to hold
487 * information about the currently limited memcgs in the system. To allow the 493 * information about the currently limited memcgs in the system. To allow the
@@ -491,19 +497,18 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
491 * Child caches will hold extra metadata needed for its operation. Fields are: 497 * Child caches will hold extra metadata needed for its operation. Fields are:
492 * 498 *
493 * @memcg: pointer to the memcg this cache belongs to 499 * @memcg: pointer to the memcg this cache belongs to
494 * @list: list_head for the list of all caches in this memcg
495 * @root_cache: pointer to the global, root cache, this cache was derived from 500 * @root_cache: pointer to the global, root cache, this cache was derived from
501 *
502 * Both root and child caches of the same kind are linked into a list chained
503 * through @list.
496 */ 504 */
497struct memcg_cache_params { 505struct memcg_cache_params {
498 bool is_root_cache; 506 bool is_root_cache;
507 struct list_head list;
499 union { 508 union {
500 struct { 509 struct memcg_cache_array __rcu *memcg_caches;
501 struct rcu_head rcu_head;
502 struct kmem_cache *memcg_caches[0];
503 };
504 struct { 510 struct {
505 struct mem_cgroup *memcg; 511 struct mem_cgroup *memcg;
506 struct list_head list;
507 struct kmem_cache *root_cache; 512 struct kmem_cache *root_cache;
508 }; 513 };
509 }; 514 };
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index b869d1662ba3..33d049066c3d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -70,7 +70,7 @@ struct kmem_cache {
70 int obj_offset; 70 int obj_offset;
71#endif /* CONFIG_DEBUG_SLAB */ 71#endif /* CONFIG_DEBUG_SLAB */
72#ifdef CONFIG_MEMCG_KMEM 72#ifdef CONFIG_MEMCG_KMEM
73 struct memcg_cache_params *memcg_params; 73 struct memcg_cache_params memcg_params;
74#endif 74#endif
75 75
76 struct kmem_cache_node *node[MAX_NUMNODES]; 76 struct kmem_cache_node *node[MAX_NUMNODES];
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d82abd40a3c0..33885118523c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -85,7 +85,7 @@ struct kmem_cache {
85 struct kobject kobj; /* For sysfs */ 85 struct kobject kobj; /* For sysfs */
86#endif 86#endif
87#ifdef CONFIG_MEMCG_KMEM 87#ifdef CONFIG_MEMCG_KMEM
88 struct memcg_cache_params *memcg_params; 88 struct memcg_cache_params memcg_params;
89 int max_attr_size; /* for propagation, maximum size of a stored attr */ 89 int max_attr_size; /* for propagation, maximum size of a stored attr */
90#ifdef CONFIG_SYSFS 90#ifdef CONFIG_SYSFS
91 struct kset *memcg_kset; 91 struct kset *memcg_kset;
@@ -110,4 +110,23 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
110} 110}
111#endif 111#endif
112 112
113
114/**
115 * virt_to_obj - returns address of the beginning of object.
116 * @s: object's kmem_cache
117 * @slab_page: address of slab page
118 * @x: address within object memory range
119 *
120 * Returns address of the beginning of object
121 */
122static inline void *virt_to_obj(struct kmem_cache *s,
123 const void *slab_page,
124 const void *x)
125{
126 return (void *)x - ((x - slab_page) % s->size);
127}
128
129void object_err(struct kmem_cache *s, struct page *page,
130 u8 *object, char *reason);
131
113#endif /* _LINUX_SLUB_DEF_H */ 132#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 93dff5fff524..be91db2a7017 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -151,6 +151,13 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
151static inline void kick_all_cpus_sync(void) { } 151static inline void kick_all_cpus_sync(void) { }
152static inline void wake_up_all_idle_cpus(void) { } 152static inline void wake_up_all_idle_cpus(void) { }
153 153
154#ifdef CONFIG_UP_LATE_INIT
155extern void __init up_late_init(void);
156static inline void smp_init(void) { up_late_init(); }
157#else
158static inline void smp_init(void) { }
159#endif
160
154#endif /* !SMP */ 161#endif /* !SMP */
155 162
156/* 163/*
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6e49a14365dc..5c19cba34dce 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -318,13 +318,6 @@ struct ucred {
318/* IPX options */ 318/* IPX options */
319#define IPX_TYPE 1 319#define IPX_TYPE 1
320 320
321extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
322 struct iovec *iov,
323 int offset,
324 unsigned int len, __wsum *csump);
325extern unsigned long iov_pages(const struct iovec *iov, int offset,
326 unsigned long nr_segs);
327
328extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 321extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
329extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 322extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
330 323
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index b2b1afbb3202..cd519a11c2c6 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by: 15 * Written by:
20 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> 16 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
21 */ 17 */
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
index bc8677c8eba9..e69e9b51b21a 100644
--- a/include/linux/spi/l4f00242t03.h
+++ b/include/linux/spi/l4f00242t03.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ 17#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
index 555d254e6606..fdd1d1d51da5 100644
--- a/include/linux/spi/lms283gf05.h
+++ b/include/linux/spi/lms283gf05.h
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18*/ 14*/
19 15
20#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ 16#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
index 4835486f58e5..381d368b91b4 100644
--- a/include/linux/spi/mxs-spi.h
+++ b/include/linux/spi/mxs-spi.h
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */ 18 */
23 19
24#ifndef __LINUX_SPI_MXS_SPI_H__ 20#ifndef __LINUX_SPI_MXS_SPI_H__
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d5a316550177..6d36dacec4ba 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18#ifndef __linux_pxa2xx_spi_h 14#ifndef __linux_pxa2xx_spi_h
19#define __linux_pxa2xx_spi_h 15#define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
57#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) 53#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
58 54
59#include <linux/clk.h> 55#include <linux/clk.h>
60#include <mach/dma.h>
61 56
62extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 57extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
63 58
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index e546b2ceb623..a693188cc08b 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -11,11 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */ 14 */
20 15
21#ifndef __LINUX_SPI_RENESAS_SPI_H__ 16#ifndef __LINUX_SPI_RENESAS_SPI_H__
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
index a1121f872ac1..aa0d440ab4f0 100644
--- a/include/linux/spi/sh_hspi.h
+++ b/include/linux/spi/sh_hspi.h
@@ -9,10 +9,6 @@
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */ 12 */
17#ifndef SH_HSPI_H 13#ifndef SH_HSPI_H
18#define SH_HSPI_H 14#define SH_HSPI_H
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index 88a14d81c49e..b087a85f5f72 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
7 u16 num_chipselect; 7 u16 num_chipselect;
8 unsigned int dma_tx_id; 8 unsigned int dma_tx_id;
9 unsigned int dma_rx_id; 9 unsigned int dma_rx_id;
10 u32 dtdl;
11 u32 syncdl;
10}; 12};
11 13
12#endif /* __SPI_SH_MSIOF_H__ */ 14#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a6ef2a8e6de4..ed9489d893a4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18 14
19#ifndef __LINUX_SPI_H 15#ifndef __LINUX_SPI_H
@@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
260 * @pump_messages: work struct for scheduling work to the message pump 256 * @pump_messages: work struct for scheduling work to the message pump
261 * @queue_lock: spinlock to syncronise access to message queue 257 * @queue_lock: spinlock to syncronise access to message queue
262 * @queue: message queue 258 * @queue: message queue
259 * @idling: the device is entering idle state
263 * @cur_msg: the currently in-flight message 260 * @cur_msg: the currently in-flight message
264 * @cur_msg_prepared: spi_prepare_message was called for the currently 261 * @cur_msg_prepared: spi_prepare_message was called for the currently
265 * in-flight message 262 * in-flight message
@@ -425,6 +422,7 @@ struct spi_master {
425 spinlock_t queue_lock; 422 spinlock_t queue_lock;
426 struct list_head queue; 423 struct list_head queue;
427 struct spi_message *cur_msg; 424 struct spi_message *cur_msg;
425 bool idling;
428 bool busy; 426 bool busy;
429 bool running; 427 bool running;
430 bool rt; 428 bool rt;
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
index 60b59187e590..414c6fddfcf0 100644
--- a/include/linux/spi/tle62x0.h
+++ b/include/linux/spi/tle62x0.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21struct tle62x0_pdata { 17struct tle62x0_pdata {
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
index 8f721e465e05..563b3b1799a8 100644
--- a/include/linux/spi/tsc2005.h
+++ b/include/linux/spi/tsc2005.h
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */ 15 */
21 16
22#ifndef _LINUX_SPI_TSC2005_H 17#ifndef _LINUX_SPI_TSC2005_H
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 262ba4ef9a8e..3e18379dfa6f 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
190#ifdef CONFIG_DEBUG_LOCK_ALLOC 190#ifdef CONFIG_DEBUG_LOCK_ALLOC
191# define raw_spin_lock_nested(lock, subclass) \ 191# define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass) 192 _raw_spin_lock_nested(lock, subclass)
193# define raw_spin_lock_bh_nested(lock, subclass) \
194 _raw_spin_lock_bh_nested(lock, subclass)
193 195
194# define raw_spin_lock_nest_lock(lock, nest_lock) \ 196# define raw_spin_lock_nest_lock(lock, nest_lock) \
195 do { \ 197 do { \
@@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
205# define raw_spin_lock_nested(lock, subclass) \ 207# define raw_spin_lock_nested(lock, subclass) \
206 _raw_spin_lock(((void)(subclass), (lock))) 208 _raw_spin_lock(((void)(subclass), (lock)))
207# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 209# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
210# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
208#endif 211#endif
209 212
210#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 213#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -324,6 +327,11 @@ do { \
324 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 327 raw_spin_lock_nested(spinlock_check(lock), subclass); \
325} while (0) 328} while (0)
326 329
330#define spin_lock_bh_nested(lock, subclass) \
331do { \
332 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
333} while (0)
334
327#define spin_lock_nest_lock(lock, nest_lock) \ 335#define spin_lock_nest_lock(lock, nest_lock) \
328do { \ 336do { \
329 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 337 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 42dfab89e740..5344268e6e62 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
26 __acquires(lock);
25void __lockfunc 27void __lockfunc
26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 28_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27 __acquires(lock); 29 __acquires(lock);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..d3afef9d8dbe 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -57,6 +57,7 @@
57 57
58#define _raw_spin_lock(lock) __LOCK(lock) 58#define _raw_spin_lock(lock) __LOCK(lock)
59#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) 59#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
60#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
60#define _raw_read_lock(lock) __LOCK(lock) 61#define _raw_read_lock(lock) __LOCK(lock)
61#define _raw_write_lock(lock) __LOCK(lock) 62#define _raw_write_lock(lock) __LOCK(lock)
62#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) 63#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a2783cb5d275..9cfd9623fb03 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -45,7 +45,7 @@ struct rcu_batch {
45#define RCU_BATCH_INIT(name) { NULL, &(name.head) } 45#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
46 46
47struct srcu_struct { 47struct srcu_struct {
48 unsigned completed; 48 unsigned long completed;
49 struct srcu_struct_array __percpu *per_cpu_ref; 49 struct srcu_struct_array __percpu *per_cpu_ref;
50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */
51 bool running; 51 bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
102 * define and init a srcu struct at build time. 102 * define and init a srcu struct at build time.
103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. 103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
104 */ 104 */
105#define DEFINE_SRCU(name) \ 105#define __DEFINE_SRCU(name, is_static) \
106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
107 struct srcu_struct name = __SRCU_STRUCT_INIT(name); 107 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
108 108#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
109#define DEFINE_STATIC_SRCU(name) \ 109#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
110 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
111 static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
112 110
113/** 111/**
114 * call_srcu() - Queue a callback for invocation after an SRCU grace period 112 * call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
135void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 133void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
136void synchronize_srcu(struct srcu_struct *sp); 134void synchronize_srcu(struct srcu_struct *sp);
137void synchronize_srcu_expedited(struct srcu_struct *sp); 135void synchronize_srcu_expedited(struct srcu_struct *sp);
138long srcu_batches_completed(struct srcu_struct *sp); 136unsigned long srcu_batches_completed(struct srcu_struct *sp);
139void srcu_barrier(struct srcu_struct *sp); 137void srcu_barrier(struct srcu_struct *sp);
140 138
141#ifdef CONFIG_DEBUG_LOCK_ALLOC 139#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index f7b9100686c3..c0f707ac192b 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -173,6 +173,7 @@
173#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) 173#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
174#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) 174#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
175#define SSB_SPROMSIZE_WORDS_R10 230 175#define SSB_SPROMSIZE_WORDS_R10 230
176#define SSB_SPROMSIZE_WORDS_R11 234
176#define SSB_SPROM_BASE1 0x1000 177#define SSB_SPROM_BASE1 0x1000
177#define SSB_SPROM_BASE31 0x0800 178#define SSB_SPROM_BASE31 0x0800
178#define SSB_SPROM_REVISION 0x007E 179#define SSB_SPROM_REVISION 0x007E
diff --git a/include/linux/string.h b/include/linux/string.h
index 2e22a2e58f3a..e40099e585c9 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -40,9 +40,6 @@ extern int strcmp(const char *,const char *);
40#ifndef __HAVE_ARCH_STRNCMP 40#ifndef __HAVE_ARCH_STRNCMP
41extern int strncmp(const char *,const char *,__kernel_size_t); 41extern int strncmp(const char *,const char *,__kernel_size_t);
42#endif 42#endif
43#ifndef __HAVE_ARCH_STRNICMP
44#define strnicmp strncasecmp
45#endif
46#ifndef __HAVE_ARCH_STRCASECMP 43#ifndef __HAVE_ARCH_STRCASECMP
47extern int strcasecmp(const char *s1, const char *s2); 44extern int strcasecmp(const char *s1, const char *s2);
48#endif 45#endif
@@ -115,7 +112,10 @@ extern void * memchr(const void *,int,__kernel_size_t);
115#endif 112#endif
116void *memchr_inv(const void *s, int c, size_t n); 113void *memchr_inv(const void *s, int c, size_t n);
117 114
115extern void kfree_const(const void *x);
116
118extern char *kstrdup(const char *s, gfp_t gfp); 117extern char *kstrdup(const char *s, gfp_t gfp);
118extern const char *kstrdup_const(const char *s, gfp_t gfp);
119extern char *kstrndup(const char *s, size_t len, gfp_t gfp); 119extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
120extern void *kmemdup(const void *src, size_t len, gfp_t gfp); 120extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
121 121
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 6eb567ac56bc..657571817260 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -10,8 +10,8 @@ enum string_size_units {
10 STRING_UNITS_2, /* use binary powers of 2^10 */ 10 STRING_UNITS_2, /* use binary powers of 2^10 */
11}; 11};
12 12
13int string_get_size(u64 size, enum string_size_units units, 13void string_get_size(u64 size, enum string_size_units units,
14 char *buf, int len); 14 char *buf, int len);
15 15
16#define UNESCAPE_SPACE 0x01 16#define UNESCAPE_SPACE 0x01
17#define UNESCAPE_OCTAL 0x02 17#define UNESCAPE_OCTAL 0x02
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index d86acc63b25f..598ba80ec30c 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -57,7 +57,7 @@ struct rpc_clnt {
57 const struct rpc_timeout *cl_timeout; /* Timeout strategy */ 57 const struct rpc_timeout *cl_timeout; /* Timeout strategy */
58 58
59 int cl_nodelen; /* nodename length */ 59 int cl_nodelen; /* nodename length */
60 char cl_nodename[UNX_MAXNODENAME]; 60 char cl_nodename[UNX_MAXNODENAME+1];
61 struct rpc_pipe_dir_head cl_pipedir_objects; 61 struct rpc_pipe_dir_head cl_pipedir_objects;
62 struct rpc_clnt * cl_parent; /* Points to parent of clones */ 62 struct rpc_clnt * cl_parent; /* Points to parent of clones */
63 struct rpc_rtt cl_rtt_default; 63 struct rpc_rtt cl_rtt_default;
@@ -112,6 +112,7 @@ struct rpc_create_args {
112 struct sockaddr *saddress; 112 struct sockaddr *saddress;
113 const struct rpc_timeout *timeout; 113 const struct rpc_timeout *timeout;
114 const char *servername; 114 const char *servername;
115 const char *nodename;
115 const struct rpc_program *program; 116 const struct rpc_program *program;
116 u32 prognumber; /* overrides program->number */ 117 u32 prognumber; /* overrides program->number */
117 u32 version; 118 u32 version;
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index eecb5a71e6c0..7e61a17030a4 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -79,6 +79,8 @@ struct rpc_clnt;
79struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *); 79struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *);
80void rpc_count_iostats(const struct rpc_task *, 80void rpc_count_iostats(const struct rpc_task *,
81 struct rpc_iostats *); 81 struct rpc_iostats *);
82void rpc_count_iostats_metrics(const struct rpc_task *,
83 struct rpc_iostats *);
82void rpc_print_iostats(struct seq_file *, struct rpc_clnt *); 84void rpc_print_iostats(struct seq_file *, struct rpc_clnt *);
83void rpc_free_iostats(struct rpc_iostats *); 85void rpc_free_iostats(struct rpc_iostats *);
84 86
@@ -87,6 +89,8 @@ void rpc_free_iostats(struct rpc_iostats *);
87static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } 89static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
88static inline void rpc_count_iostats(const struct rpc_task *task, 90static inline void rpc_count_iostats(const struct rpc_task *task,
89 struct rpc_iostats *stats) {} 91 struct rpc_iostats *stats) {}
92static inline void rpc_count_iostats_metrics(const struct rpc_task *,
93 struct rpc_iostats *) {}
90static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} 94static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
91static inline void rpc_free_iostats(struct rpc_iostats *stats) {} 95static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
92 96
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index b78f16b1dea3..f33c5a4d6fe4 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -42,6 +42,9 @@
42 42
43#include <linux/types.h> 43#include <linux/types.h>
44 44
45#define RPCRDMA_VERSION 1
46#define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION)
47
45struct rpcrdma_segment { 48struct rpcrdma_segment {
46 __be32 rs_handle; /* Registered memory handle */ 49 __be32 rs_handle; /* Registered memory handle */
47 __be32 rs_length; /* Length of the chunk in bytes */ 50 __be32 rs_length; /* Length of the chunk in bytes */
@@ -95,7 +98,10 @@ struct rpcrdma_msg {
95 } rm_body; 98 } rm_body;
96}; 99};
97 100
98#define RPCRDMA_HDRLEN_MIN 28 101/*
102 * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
103 */
104#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
99 105
100enum rpcrdma_errcode { 106enum rpcrdma_errcode {
101 ERR_VERS = 1, 107 ERR_VERS = 1,
@@ -115,4 +121,10 @@ enum rpcrdma_proc {
115 RDMA_ERROR = 4 /* An RPC RDMA encoding error */ 121 RDMA_ERROR = 4 /* An RPC RDMA encoding error */
116}; 122};
117 123
124#define rdma_msg cpu_to_be32(RDMA_MSG)
125#define rdma_nomsg cpu_to_be32(RDMA_NOMSG)
126#define rdma_msgp cpu_to_be32(RDMA_MSGP)
127#define rdma_done cpu_to_be32(RDMA_DONE)
128#define rdma_error cpu_to_be32(RDMA_ERROR)
129
118#endif /* _LINUX_SUNRPC_RPC_RDMA_H */ 130#endif /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 6f22cfeef5e3..fae6fb947fc8 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -110,7 +110,7 @@ struct svc_serv {
110 * We use sv_nrthreads as a reference count. svc_destroy() drops 110 * We use sv_nrthreads as a reference count. svc_destroy() drops
111 * this refcount, so we need to bump it up around operations that 111 * this refcount, so we need to bump it up around operations that
112 * change the number of threads. Horrible, but there it is. 112 * change the number of threads. Horrible, but there it is.
113 * Should be called with the BKL held. 113 * Should be called with the "service mutex" held.
114 */ 114 */
115static inline void svc_get(struct svc_serv *serv) 115static inline void svc_get(struct svc_serv *serv)
116{ 116{
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 975da754c778..df8edf8ec914 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -63,8 +63,6 @@ extern atomic_t rdma_stat_rq_prod;
63extern atomic_t rdma_stat_sq_poll; 63extern atomic_t rdma_stat_sq_poll;
64extern atomic_t rdma_stat_sq_prod; 64extern atomic_t rdma_stat_sq_prod;
65 65
66#define RPCRDMA_VERSION 1
67
68/* 66/*
69 * Contexts are built when an RDMA request is created and are a 67 * Contexts are built when an RDMA request is created and are a
70 * record of the resources that can be recovered when the request 68 * record of the resources that can be recovered when the request
@@ -79,6 +77,7 @@ struct svc_rdma_op_ctxt {
79 enum ib_wr_opcode wr_op; 77 enum ib_wr_opcode wr_op;
80 enum ib_wc_status wc_status; 78 enum ib_wc_status wc_status;
81 u32 byte_len; 79 u32 byte_len;
80 u32 position;
82 struct svcxprt_rdma *xprt; 81 struct svcxprt_rdma *xprt;
83 unsigned long flags; 82 unsigned long flags;
84 enum dma_data_direction direction; 83 enum dma_data_direction direction;
@@ -150,6 +149,10 @@ struct svcxprt_rdma {
150 struct ib_cq *sc_rq_cq; 149 struct ib_cq *sc_rq_cq;
151 struct ib_cq *sc_sq_cq; 150 struct ib_cq *sc_sq_cq;
152 struct ib_mr *sc_phys_mr; /* MR for server memory */ 151 struct ib_mr *sc_phys_mr; /* MR for server memory */
152 int (*sc_reader)(struct svcxprt_rdma *,
153 struct svc_rqst *,
154 struct svc_rdma_op_ctxt *,
155 int *, u32 *, u32, u32, u64, bool);
153 u32 sc_dev_caps; /* distilled device caps */ 156 u32 sc_dev_caps; /* distilled device caps */
154 u32 sc_dma_lkey; /* local dma key */ 157 u32 sc_dma_lkey; /* local dma key */
155 unsigned int sc_frmr_pg_list_len; 158 unsigned int sc_frmr_pg_list_len;
@@ -178,8 +181,6 @@ struct svcxprt_rdma {
178#define RPCRDMA_MAX_REQ_SIZE 4096 181#define RPCRDMA_MAX_REQ_SIZE 4096
179 182
180/* svc_rdma_marshal.c */ 183/* svc_rdma_marshal.c */
181extern void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *,
182 int *, int *);
183extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); 184extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
184extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *); 185extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *);
185extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 186extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -197,6 +198,12 @@ extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *);
197 198
198/* svc_rdma_recvfrom.c */ 199/* svc_rdma_recvfrom.c */
199extern int svc_rdma_recvfrom(struct svc_rqst *); 200extern int svc_rdma_recvfrom(struct svc_rqst *);
201extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
202 struct svc_rdma_op_ctxt *, int *, u32 *,
203 u32, u32, u64, bool);
204extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
205 struct svc_rdma_op_ctxt *, int *, u32 *,
206 u32, u32, u64, bool);
200 207
201/* svc_rdma_sendto.c */ 208/* svc_rdma_sendto.c */
202extern int svc_rdma_sendto(struct svc_rqst *); 209extern int svc_rdma_sendto(struct svc_rqst *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 9d27ac45b909..8b93ef53df3c 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -347,6 +347,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt);
347void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); 347void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
348int xs_swapper(struct rpc_xprt *xprt, int enable); 348int xs_swapper(struct rpc_xprt *xprt, int enable);
349 349
350bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
351void xprt_unlock_connect(struct rpc_xprt *, void *);
352
350/* 353/*
351 * Reserved bit positions in xprt->state 354 * Reserved bit positions in xprt->state
352 */ 355 */
@@ -357,10 +360,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
357#define XPRT_BOUND (4) 360#define XPRT_BOUND (4)
358#define XPRT_BINDING (5) 361#define XPRT_BINDING (5)
359#define XPRT_CLOSING (6) 362#define XPRT_CLOSING (6)
360#define XPRT_CONNECTION_ABORT (7)
361#define XPRT_CONNECTION_CLOSE (8)
362#define XPRT_CONGESTED (9) 363#define XPRT_CONGESTED (9)
363#define XPRT_CONNECTION_REUSE (10)
364 364
365static inline void xprt_set_connected(struct rpc_xprt *xprt) 365static inline void xprt_set_connected(struct rpc_xprt *xprt)
366{ 366{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 3388c1b6f7d8..5efe743ce1e8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -201,6 +201,21 @@ struct platform_freeze_ops {
201 */ 201 */
202extern void suspend_set_ops(const struct platform_suspend_ops *ops); 202extern void suspend_set_ops(const struct platform_suspend_ops *ops);
203extern int suspend_valid_only_mem(suspend_state_t state); 203extern int suspend_valid_only_mem(suspend_state_t state);
204
205/* Suspend-to-idle state machnine. */
206enum freeze_state {
207 FREEZE_STATE_NONE, /* Not suspended/suspending. */
208 FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
209 FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
210};
211
212extern enum freeze_state __read_mostly suspend_freeze_state;
213
214static inline bool idle_should_freeze(void)
215{
216 return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
217}
218
204extern void freeze_set_ops(const struct platform_freeze_ops *ops); 219extern void freeze_set_ops(const struct platform_freeze_ops *ops);
205extern void freeze_wake(void); 220extern void freeze_wake(void);
206 221
@@ -228,6 +243,7 @@ extern int pm_suspend(suspend_state_t state);
228 243
229static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 244static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
230static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 245static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
246static inline bool idle_should_freeze(void) { return false; }
231static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} 247static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
232static inline void freeze_wake(void) {} 248static inline void freeze_wake(void) {}
233#endif /* !CONFIG_SUSPEND */ 249#endif /* !CONFIG_SUSPEND */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 34e8b60ab973..7067eca501e2 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -437,16 +437,6 @@ extern int reuse_swap_page(struct page *);
437extern int try_to_free_swap(struct page *); 437extern int try_to_free_swap(struct page *);
438struct backing_dev_info; 438struct backing_dev_info;
439 439
440#ifdef CONFIG_MEMCG
441extern void
442mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
443#else
444static inline void
445mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
446{
447}
448#endif
449
450#else /* CONFIG_SWAP */ 440#else /* CONFIG_SWAP */
451 441
452#define swap_address_space(entry) (NULL) 442#define swap_address_space(entry) (NULL)
@@ -547,11 +537,6 @@ static inline swp_entry_t get_swap_page(void)
547 return entry; 537 return entry;
548} 538}
549 539
550static inline void
551mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
552{
553}
554
555#endif /* CONFIG_SWAP */ 540#endif /* CONFIG_SWAP */
556#endif /* __KERNEL__*/ 541#endif /* __KERNEL__*/
557#endif /* _LINUX_SWAP_H */ 542#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 6adfb7bfbf44..cedf3d3c373f 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -54,7 +54,7 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
54/* check whether a pte points to a swap entry */ 54/* check whether a pte points to a swap entry */
55static inline int is_swap_pte(pte_t pte) 55static inline int is_swap_pte(pte_t pte)
56{ 56{
57 return !pte_none(pte) && !pte_present_nonuma(pte) && !pte_file(pte); 57 return !pte_none(pte) && !pte_present(pte);
58} 58}
59#endif 59#endif
60 60
@@ -66,7 +66,6 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
66{ 66{
67 swp_entry_t arch_entry; 67 swp_entry_t arch_entry;
68 68
69 BUG_ON(pte_file(pte));
70 if (pte_swp_soft_dirty(pte)) 69 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte); 70 pte = pte_swp_clear_soft_dirty(pte);
72 arch_entry = __pte_to_swp_entry(pte); 71 arch_entry = __pte_to_swp_entry(pte);
@@ -82,7 +81,6 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
82 swp_entry_t arch_entry; 81 swp_entry_t arch_entry;
83 82
84 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); 83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
85 BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
86 return __swp_entry_to_pte(arch_entry); 84 return __swp_entry_to_pte(arch_entry);
87} 85}
88 86
@@ -137,6 +135,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
137 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); 135 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
138} 136}
139 137
138extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
139 spinlock_t *ptl);
140extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 140extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
141 unsigned long address); 141 unsigned long address);
142extern void migration_entry_wait_huge(struct vm_area_struct *vma, 142extern void migration_entry_wait_huge(struct vm_area_struct *vma,
@@ -150,6 +150,8 @@ static inline int is_migration_entry(swp_entry_t swp)
150} 150}
151#define migration_entry_to_page(swp) NULL 151#define migration_entry_to_page(swp) NULL
152static inline void make_migration_entry_read(swp_entry_t *entryp) { } 152static inline void make_migration_entry_read(swp_entry_t *entryp) { }
153static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
154 spinlock_t *ptl) { }
153static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 155static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
154 unsigned long address) { } 156 unsigned long address) { }
155static inline void migration_entry_wait_huge(struct vm_area_struct *vma, 157static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 85893d744901..76d1e38aabe1 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -410,12 +410,16 @@ asmlinkage long sys_newlstat(const char __user *filename,
410 struct stat __user *statbuf); 410 struct stat __user *statbuf);
411asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf); 411asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf);
412asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); 412asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
413#if BITS_PER_LONG == 32 413#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
414asmlinkage long sys_stat64(const char __user *filename, 414asmlinkage long sys_stat64(const char __user *filename,
415 struct stat64 __user *statbuf); 415 struct stat64 __user *statbuf);
416asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); 416asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
417asmlinkage long sys_lstat64(const char __user *filename, 417asmlinkage long sys_lstat64(const char __user *filename,
418 struct stat64 __user *statbuf); 418 struct stat64 __user *statbuf);
419asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
420 struct stat64 __user *statbuf, int flag);
421#endif
422#if BITS_PER_LONG == 32
419asmlinkage long sys_truncate64(const char __user *path, loff_t length); 423asmlinkage long sys_truncate64(const char __user *path, loff_t length);
420asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); 424asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
421#endif 425#endif
@@ -771,8 +775,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
771 umode_t mode); 775 umode_t mode);
772asmlinkage long sys_newfstatat(int dfd, const char __user *filename, 776asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
773 struct stat __user *statbuf, int flag); 777 struct stat __user *statbuf, int flag);
774asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
775 struct stat64 __user *statbuf, int flag);
776asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, 778asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
777 int bufsiz); 779 int bufsiz);
778asmlinkage long sys_utimensat(int dfd, const char __user *filename, 780asmlinkage long sys_utimensat(int dfd, const char __user *filename,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 67309ece0772..1a7adb411647 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -115,6 +115,7 @@ struct tcp_request_sock {
115 u32 rcv_isn; 115 u32 rcv_isn;
116 u32 snt_isn; 116 u32 snt_isn;
117 u32 snt_synack; /* synack sent time */ 117 u32 snt_synack; /* synack sent time */
118 u32 last_oow_ack_time; /* last SYNACK */
118 u32 rcv_nxt; /* the ack # by SYNACK. For 119 u32 rcv_nxt; /* the ack # by SYNACK. For
119 * FastOpen it's the seq# 120 * FastOpen it's the seq#
120 * after data-in-SYN. 121 * after data-in-SYN.
@@ -152,6 +153,7 @@ struct tcp_sock {
152 u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 153 u32 snd_sml; /* Last byte of the most recently transmitted small packet */
153 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 154 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
154 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 155 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
156 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
155 157
156 u32 tsoffset; /* timestamp offset */ 158 u32 tsoffset; /* timestamp offset */
157 159
@@ -340,6 +342,10 @@ struct tcp_timewait_sock {
340 u32 tw_rcv_wnd; 342 u32 tw_rcv_wnd;
341 u32 tw_ts_offset; 343 u32 tw_ts_offset;
342 u32 tw_ts_recent; 344 u32 tw_ts_recent;
345
346 /* The time we sent the last out-of-window ACK: */
347 u32 tw_last_oow_ack_time;
348
343 long tw_ts_recent_stamp; 349 long tw_ts_recent_stamp;
344#ifdef CONFIG_TCP_MD5SIG 350#ifdef CONFIG_TCP_MD5SIG
345 struct tcp_md5sig_key *tw_md5_key; 351 struct tcp_md5sig_key *tw_md5_key;
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 884d6263e962..c78dcfeaf25f 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,6 +86,7 @@ struct st_proto_s {
86extern long st_register(struct st_proto_s *); 86extern long st_register(struct st_proto_s *);
87extern long st_unregister(struct st_proto_s *); 87extern long st_unregister(struct st_proto_s *);
88 88
89extern struct ti_st_plat_data *dt_pdata;
89 90
90/* 91/*
91 * header information used by st_core.c 92 * header information used by st_core.c
@@ -261,7 +262,7 @@ struct kim_data_s {
261 struct completion kim_rcvd, ldisc_installed; 262 struct completion kim_rcvd, ldisc_installed;
262 char resp_buffer[30]; 263 char resp_buffer[30];
263 const struct firmware *fw_entry; 264 const struct firmware *fw_entry;
264 long nshutdown; 265 unsigned nshutdown;
265 unsigned long rx_state; 266 unsigned long rx_state;
266 unsigned long rx_count; 267 unsigned long rx_count;
267 struct sk_buff *rx_skb; 268 struct sk_buff *rx_skb;
@@ -269,8 +270,8 @@ struct kim_data_s {
269 struct chip_version version; 270 struct chip_version version;
270 unsigned char ldisc_install; 271 unsigned char ldisc_install;
271 unsigned char dev_name[UART_DEV_NAME_LEN + 1]; 272 unsigned char dev_name[UART_DEV_NAME_LEN + 1];
272 unsigned char flow_cntrl; 273 unsigned flow_cntrl;
273 unsigned long baud_rate; 274 unsigned baud_rate;
274}; 275};
275 276
276/** 277/**
@@ -436,10 +437,10 @@ struct gps_event_hdr {
436 * 437 *
437 */ 438 */
438struct ti_st_plat_data { 439struct ti_st_plat_data {
439 long nshutdown_gpio; 440 u32 nshutdown_gpio;
440 unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */ 441 unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */
441 unsigned char flow_cntrl; /* flow control flag */ 442 u32 flow_cntrl; /* flow control flag */
442 unsigned long baud_rate; 443 u32 baud_rate;
443 int (*suspend)(struct platform_device *, pm_message_t); 444 int (*suspend)(struct platform_device *, pm_message_t);
444 int (*resume)(struct platform_device *); 445 int (*resume)(struct platform_device *);
445 int (*chip_enable) (struct kim_data_s *); 446 int (*chip_enable) (struct kim_data_s *);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index eda850ca757a..9c085dc12ae9 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -79,6 +79,9 @@ extern void __init tick_init(void);
79extern int tick_is_oneshot_available(void); 79extern int tick_is_oneshot_available(void);
80extern struct tick_device *tick_get_device(int cpu); 80extern struct tick_device *tick_get_device(int cpu);
81 81
82extern void tick_freeze(void);
83extern void tick_unfreeze(void);
84
82# ifdef CONFIG_HIGH_RES_TIMERS 85# ifdef CONFIG_HIGH_RES_TIMERS
83extern int tick_init_highres(void); 86extern int tick_init_highres(void);
84extern int tick_program_event(ktime_t expires, int force); 87extern int tick_program_event(ktime_t expires, int force);
@@ -119,6 +122,8 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
119 122
120#else /* CONFIG_GENERIC_CLOCKEVENTS */ 123#else /* CONFIG_GENERIC_CLOCKEVENTS */
121static inline void tick_init(void) { } 124static inline void tick_init(void) { }
125static inline void tick_freeze(void) { }
126static inline void tick_unfreeze(void) { }
122static inline void tick_cancel_sched_timer(int cpu) { } 127static inline void tick_cancel_sched_timer(int cpu) { }
123static inline void tick_clock_notify(void) { } 128static inline void tick_clock_notify(void) { }
124static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 129static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
@@ -226,5 +231,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk)
226 __tick_nohz_task_switch(tsk); 231 __tick_nohz_task_switch(tsk);
227} 232}
228 233
229
230#endif 234#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 203c2ad40d71..beebe3a02d43 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -110,6 +110,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
110 return true; 110 return true;
111} 111}
112 112
113static inline bool timeval_valid(const struct timeval *tv)
114{
115 /* Dates before 1970 are bogus */
116 if (tv->tv_sec < 0)
117 return false;
118
119 /* Can't have more microseconds then a second */
120 if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
121 return false;
122
123 return true;
124}
125
113extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 126extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
114 127
115#define CURRENT_TIME (current_kernel_time()) 128#define CURRENT_TIME (current_kernel_time())
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
new file mode 100644
index 000000000000..4382035a75bb
--- /dev/null
+++ b/include/linux/timecounter.h
@@ -0,0 +1,139 @@
1/*
2 * linux/include/linux/timecounter.h
3 *
4 * based on code that migrated away from
5 * linux/include/linux/clocksource.h
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#ifndef _LINUX_TIMECOUNTER_H
18#define _LINUX_TIMECOUNTER_H
19
20#include <linux/types.h>
21
22/* simplify initialization of mask field */
23#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
24
25/**
26 * struct cyclecounter - hardware abstraction for a free running counter
27 * Provides completely state-free accessors to the underlying hardware.
28 * Depending on which hardware it reads, the cycle counter may wrap
29 * around quickly. Locking rules (if necessary) have to be defined
30 * by the implementor and user of specific instances of this API.
31 *
32 * @read: returns the current cycle value
33 * @mask: bitmask for two's complement
34 * subtraction of non 64 bit counters,
35 * see CYCLECOUNTER_MASK() helper macro
36 * @mult: cycle to nanosecond multiplier
37 * @shift: cycle to nanosecond divisor (power of two)
38 */
39struct cyclecounter {
40 cycle_t (*read)(const struct cyclecounter *cc);
41 cycle_t mask;
42 u32 mult;
43 u32 shift;
44};
45
46/**
47 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
48 * Contains the state needed by timecounter_read() to detect
49 * cycle counter wrap around. Initialize with
50 * timecounter_init(). Also used to convert cycle counts into the
51 * corresponding nanosecond counts with timecounter_cyc2time(). Users
52 * of this code are responsible for initializing the underlying
53 * cycle counter hardware, locking issues and reading the time
54 * more often than the cycle counter wraps around. The nanosecond
55 * counter will only wrap around after ~585 years.
56 *
57 * @cc: the cycle counter used by this instance
58 * @cycle_last: most recent cycle counter value seen by
59 * timecounter_read()
60 * @nsec: continuously increasing count
61 * @mask: bit mask for maintaining the 'frac' field
62 * @frac: accumulated fractional nanoseconds
63 */
64struct timecounter {
65 const struct cyclecounter *cc;
66 cycle_t cycle_last;
67 u64 nsec;
68 u64 mask;
69 u64 frac;
70};
71
72/**
73 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
74 * @cc: Pointer to cycle counter.
75 * @cycles: Cycles
76 * @mask: bit mask for maintaining the 'frac' field
77 * @frac: pointer to storage for the fractional nanoseconds.
78 */
79static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
80 cycle_t cycles, u64 mask, u64 *frac)
81{
82 u64 ns = (u64) cycles;
83
84 ns = (ns * cc->mult) + *frac;
85 *frac = ns & mask;
86 return ns >> cc->shift;
87}
88
89/**
90 * timecounter_adjtime - Shifts the time of the clock.
91 * @delta: Desired change in nanoseconds.
92 */
93static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
94{
95 tc->nsec += delta;
96}
97
98/**
99 * timecounter_init - initialize a time counter
100 * @tc: Pointer to time counter which is to be initialized/reset
101 * @cc: A cycle counter, ready to be used.
102 * @start_tstamp: Arbitrary initial time stamp.
103 *
104 * After this call the current cycle register (roughly) corresponds to
105 * the initial time stamp. Every call to timecounter_read() increments
106 * the time stamp counter by the number of elapsed nanoseconds.
107 */
108extern void timecounter_init(struct timecounter *tc,
109 const struct cyclecounter *cc,
110 u64 start_tstamp);
111
112/**
113 * timecounter_read - return nanoseconds elapsed since timecounter_init()
114 * plus the initial time stamp
115 * @tc: Pointer to time counter.
116 *
117 * In other words, keeps track of time since the same epoch as
118 * the function which generated the initial time stamp.
119 */
120extern u64 timecounter_read(struct timecounter *tc);
121
122/**
123 * timecounter_cyc2time - convert a cycle counter to same
124 * time base as values returned by
125 * timecounter_read()
126 * @tc: Pointer to time counter.
127 * @cycle_tstamp: a value returned by tc->cc->read()
128 *
129 * Cycle counts that are converted correctly as long as they
130 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
131 * with "max cycle count" == cs->mask+1.
132 *
133 * This allows conversion of cycle counter values which were generated
134 * in the past.
135 */
136extern u64 timecounter_cyc2time(struct timecounter *tc,
137 cycle_t cycle_tstamp);
138
139#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 9b63d13ba82b..3eaae4754275 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -33,6 +33,7 @@ extern time64_t ktime_get_real_seconds(void);
33 33
34extern int __getnstimeofday64(struct timespec64 *tv); 34extern int __getnstimeofday64(struct timespec64 *tv);
35extern void getnstimeofday64(struct timespec64 *tv); 35extern void getnstimeofday64(struct timespec64 *tv);
36extern void getboottime64(struct timespec64 *ts);
36 37
37#if BITS_PER_LONG == 64 38#if BITS_PER_LONG == 64
38/** 39/**
@@ -72,6 +73,11 @@ static inline struct timespec get_monotonic_coarse(void)
72{ 73{
73 return get_monotonic_coarse64(); 74 return get_monotonic_coarse64();
74} 75}
76
77static inline void getboottime(struct timespec *ts)
78{
79 return getboottime64(ts);
80}
75#else 81#else
76/** 82/**
77 * Deprecated. Use do_settimeofday64(). 83 * Deprecated. Use do_settimeofday64().
@@ -129,9 +135,15 @@ static inline struct timespec get_monotonic_coarse(void)
129{ 135{
130 return timespec64_to_timespec(get_monotonic_coarse64()); 136 return timespec64_to_timespec(get_monotonic_coarse64());
131} 137}
132#endif
133 138
134extern void getboottime(struct timespec *ts); 139static inline void getboottime(struct timespec *ts)
140{
141 struct timespec64 ts64;
142
143 getboottime64(&ts64);
144 *ts = timespec64_to_timespec(ts64);
145}
146#endif
135 147
136#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 148#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
137#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 149#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
@@ -217,6 +229,11 @@ static inline void get_monotonic_boottime(struct timespec *ts)
217 *ts = ktime_to_timespec(ktime_get_boottime()); 229 *ts = ktime_to_timespec(ktime_get_boottime());
218} 230}
219 231
232static inline void get_monotonic_boottime64(struct timespec64 *ts)
233{
234 *ts = ktime_to_timespec64(ktime_get_boottime());
235}
236
220static inline void timekeeping_clocktai(struct timespec *ts) 237static inline void timekeeping_clocktai(struct timespec *ts)
221{ 238{
222 *ts = ktime_to_timespec(ktime_get_clocktai()); 239 *ts = ktime_to_timespec(ktime_get_clocktai());
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e08e21e5f601..c72851328ca9 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -173,7 +173,7 @@ extern void syscall_unregfunc(void);
173 TP_PROTO(data_proto), \ 173 TP_PROTO(data_proto), \
174 TP_ARGS(data_args), \ 174 TP_ARGS(data_args), \
175 TP_CONDITION(cond),,); \ 175 TP_CONDITION(cond),,); \
176 if (IS_ENABLED(CONFIG_LOCKDEP)) { \ 176 if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
177 rcu_read_lock_sched_notrace(); \ 177 rcu_read_lock_sched_notrace(); \
178 rcu_dereference_sched(__tracepoint_##name.funcs);\ 178 rcu_dereference_sched(__tracepoint_##name.funcs);\
179 rcu_read_unlock_sched_notrace(); \ 179 rcu_read_unlock_sched_notrace(); \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7d66ae508e5c..358a337af598 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -14,6 +14,29 @@
14#include <linux/llist.h> 14#include <linux/llist.h>
15 15
16 16
17/*
18 * Lock subclasses for tty locks
19 *
20 * TTY_LOCK_NORMAL is for normal ttys and master ptys.
21 * TTY_LOCK_SLAVE is for slave ptys only.
22 *
23 * Lock subclasses are necessary for handling nested locking with pty pairs.
24 * tty locks which use nested locking:
25 *
26 * legacy_mutex - Nested tty locks are necessary for releasing pty pairs.
27 * The stable lock order is master pty first, then slave pty.
28 * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem.
29 * Subclassing this lock enables the slave pty to hold its
30 * termios_rwsem when claiming the master tty_buffer lock.
31 * tty_buffer lock - slave ptys can claim nested buffer lock when handling
32 * signal chars. The stable lock order is slave pty, then
33 * master.
34 */
35
36enum {
37 TTY_LOCK_NORMAL = 0,
38 TTY_LOCK_SLAVE,
39};
17 40
18/* 41/*
19 * (Note: the *_driver.minor_start values 1, 64, 128, 192 are 42 * (Note: the *_driver.minor_start values 1, 64, 128, 192 are
@@ -443,6 +466,7 @@ extern void tty_flush_to_ldisc(struct tty_struct *tty);
443extern void tty_buffer_free_all(struct tty_port *port); 466extern void tty_buffer_free_all(struct tty_port *port);
444extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); 467extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
445extern void tty_buffer_init(struct tty_port *port); 468extern void tty_buffer_init(struct tty_port *port);
469extern void tty_buffer_set_lock_subclass(struct tty_port *port);
446extern speed_t tty_termios_baud_rate(struct ktermios *termios); 470extern speed_t tty_termios_baud_rate(struct ktermios *termios);
447extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); 471extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
448extern void tty_termios_encode_baud_rate(struct ktermios *termios, 472extern void tty_termios_encode_baud_rate(struct ktermios *termios,
@@ -467,7 +491,6 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
467 491
468extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); 492extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
469extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); 493extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
470extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
471 494
472extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); 495extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
473extern void tty_ldisc_deref(struct tty_ldisc *); 496extern void tty_ldisc_deref(struct tty_ldisc *);
diff --git a/include/linux/types.h b/include/linux/types.h
index a0bb7048687f..6747247e3f9f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -135,12 +135,9 @@ typedef unsigned long blkcnt_t;
135#endif 135#endif
136 136
137/* 137/*
138 * The type of an index into the pagecache. Use a #define so asm/types.h 138 * The type of an index into the pagecache.
139 * can override it.
140 */ 139 */
141#ifndef pgoff_t
142#define pgoff_t unsigned long 140#define pgoff_t unsigned long
143#endif
144 141
145/* A dma_addr_t can hold any valid DMA or bus address for the platform */ 142/* A dma_addr_t can hold any valid DMA or bus address for the platform */
146#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 143#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
@@ -213,5 +210,8 @@ struct callback_head {
213}; 210};
214#define rcu_head callback_head 211#define rcu_head callback_head
215 212
213/* clocksource cycle base type */
214typedef u64 cycle_t;
215
216#endif /* __ASSEMBLY__ */ 216#endif /* __ASSEMBLY__ */
217#endif /* _LINUX_TYPES_H */ 217#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ee3277593222..247cfdcc4b08 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,11 +49,7 @@ struct udp_sock {
49 unsigned int corkflag; /* Cork is required */ 49 unsigned int corkflag; /* Cork is required */
50 __u8 encap_type; /* Is this an Encapsulation socket? */ 50 __u8 encap_type; /* Is this an Encapsulation socket? */
51 unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ 51 unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
52 no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ 52 no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
53 convert_csum:1;/* On receive, convert checksum
54 * unnecessary to checksum complete
55 * if possible.
56 */
57 /* 53 /*
58 * Following member retains the information to create a UDP header 54 * Following member retains the information to create a UDP header
59 * when the socket is uncorked. 55 * when the socket is uncorked.
@@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
102 return udp_sk(sk)->no_check6_rx; 98 return udp_sk(sk)->no_check6_rx;
103} 99}
104 100
105static inline void udp_set_convert_csum(struct sock *sk, bool val)
106{
107 udp_sk(sk)->convert_csum = val;
108}
109
110static inline bool udp_get_convert_csum(struct sock *sk)
111{
112 return udp_sk(sk)->convert_csum;
113}
114
115#define udp_portaddr_for_each_entry(__sk, node, list) \ 101#define udp_portaddr_for_each_entry(__sk, node, list) \
116 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) 102 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
117 103
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1c5e453f7ea9..3e0cb4ea3905 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -135,10 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
135size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 135size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
136size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 136size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
137 137
138int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
139int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
140 int offset, int len);
141int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
142 int offset, int len);
143
144#endif 138#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index f89c24a03bd9..7ee1b5c3b4cb 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -82,7 +82,7 @@ struct usb_host_interface {
82 int extralen; 82 int extralen;
83 unsigned char *extra; /* Extra descriptors */ 83 unsigned char *extra; /* Extra descriptors */
84 84
85 /* array of desc.bNumEndpoint endpoints associated with this 85 /* array of desc.bNumEndpoints endpoints associated with this
86 * interface setting. these will be in no particular order. 86 * interface setting. these will be in no particular order.
87 */ 87 */
88 struct usb_host_endpoint *endpoint; 88 struct usb_host_endpoint *endpoint;
@@ -127,10 +127,6 @@ enum usb_interface_condition {
127 * to the sysfs representation for that device. 127 * to the sysfs representation for that device.
128 * @pm_usage_cnt: PM usage counter for this interface 128 * @pm_usage_cnt: PM usage counter for this interface
129 * @reset_ws: Used for scheduling resets from atomic context. 129 * @reset_ws: Used for scheduling resets from atomic context.
130 * @reset_running: set to 1 if the interface is currently running a
131 * queued reset so that usb_cancel_queued_reset() doesn't try to
132 * remove from the workqueue when running inside the worker
133 * thread. See __usb_queue_reset_device().
134 * @resetting_device: USB core reset the device, so use alt setting 0 as 130 * @resetting_device: USB core reset the device, so use alt setting 0 as
135 * current; needs bandwidth alloc after reset. 131 * current; needs bandwidth alloc after reset.
136 * 132 *
@@ -181,7 +177,6 @@ struct usb_interface {
181 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ 177 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
182 unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ 178 unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
183 unsigned needs_binding:1; /* needs delayed unbind/rebind */ 179 unsigned needs_binding:1; /* needs delayed unbind/rebind */
184 unsigned reset_running:1;
185 unsigned resetting_device:1; /* true: bandwidth alloc after reset */ 180 unsigned resetting_device:1; /* true: bandwidth alloc after reset */
186 181
187 struct device dev; /* interface specific device info */ 182 struct device dev; /* interface specific device info */
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
index 7eb4dcd0d386..db0431b39a63 100644
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -34,6 +34,8 @@ struct usb_hcd;
34 * after initialization. 34 * after initialization.
35 * @no_io_watchdog: set to 1 if the controller does not need the I/O 35 * @no_io_watchdog: set to 1 if the controller does not need the I/O
36 * watchdog to run. 36 * watchdog to run.
37 * @reset_on_resume: set to 1 if the controller needs to be reset after
38 * a suspend / resume cycle (but can't detect that itself).
37 * 39 *
38 * These are general configuration options for the EHCI controller. All of 40 * These are general configuration options for the EHCI controller. All of
39 * these options are activating more or less workarounds for some hardware. 41 * these options are activating more or less workarounds for some hardware.
@@ -45,6 +47,8 @@ struct usb_ehci_pdata {
45 unsigned big_endian_desc:1; 47 unsigned big_endian_desc:1;
46 unsigned big_endian_mmio:1; 48 unsigned big_endian_mmio:1;
47 unsigned no_io_watchdog:1; 49 unsigned no_io_watchdog:1;
50 unsigned reset_on_resume:1;
51 unsigned dma_mask_64:1;
48 52
49 /* Turn on all power and clocks */ 53 /* Turn on all power and clocks */
50 int (*power_on)(struct platform_device *pdev); 54 int (*power_on)(struct platform_device *pdev);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 70ddb3943b62..e2f00fd8cd47 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -523,6 +523,7 @@ struct usb_gadget_ops {
523 * enabled HNP support. 523 * enabled HNP support.
524 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to 524 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
525 * MaxPacketSize. 525 * MaxPacketSize.
526 * @is_selfpowered: if the gadget is self-powered.
526 * 527 *
527 * Gadgets have a mostly-portable "gadget driver" implementing device 528 * Gadgets have a mostly-portable "gadget driver" implementing device
528 * functions, handling all usb configurations and interfaces. Gadget 529 * functions, handling all usb configurations and interfaces. Gadget
@@ -563,6 +564,7 @@ struct usb_gadget {
563 unsigned a_hnp_support:1; 564 unsigned a_hnp_support:1;
564 unsigned a_alt_hnp_support:1; 565 unsigned a_alt_hnp_support:1;
565 unsigned quirk_ep_out_aligned_size:1; 566 unsigned quirk_ep_out_aligned_size:1;
567 unsigned is_selfpowered:1;
566}; 568};
567#define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) 569#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
568 570
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 086bf13307e6..68b1e836dff1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -146,6 +146,8 @@ struct usb_hcd {
146 unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ 146 unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
147 unsigned can_do_streams:1; /* HC supports streams */ 147 unsigned can_do_streams:1; /* HC supports streams */
148 unsigned tpl_support:1; /* OTG & EH TPL support */ 148 unsigned tpl_support:1; /* OTG & EH TPL support */
149 unsigned cant_recv_wakeups:1;
150 /* wakeup requests from downstream aren't received */
149 151
150 unsigned int irq; /* irq allocated */ 152 unsigned int irq; /* irq allocated */
151 void __iomem *regs; /* device memory/io */ 153 void __iomem *regs; /* device memory/io */
@@ -453,6 +455,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
453#endif /* CONFIG_PCI */ 455#endif /* CONFIG_PCI */
454 456
455/* pci-ish (pdev null is ok) buffer alloc/mapping support */ 457/* pci-ish (pdev null is ok) buffer alloc/mapping support */
458void usb_init_pool_max(void);
456int hcd_buffer_create(struct usb_hcd *hcd); 459int hcd_buffer_create(struct usb_hcd *hcd);
457void hcd_buffer_destroy(struct usb_hcd *hcd); 460void hcd_buffer_destroy(struct usb_hcd *hcd);
458 461
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index f499c23e6342..bc91b5d380fd 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -1,5 +1,5 @@
1/* USB OTG (On The Go) defines */
2/* 1/*
2 * USB PHY defines
3 * 3 *
4 * These APIs may be used between USB controllers. USB device drivers 4 * These APIs may be used between USB controllers. USB device drivers
5 * (for either host or peripheral roles) don't use these calls; they 5 * (for either host or peripheral roles) don't use these calls; they
@@ -106,7 +106,7 @@ struct usb_phy {
106 int (*set_power)(struct usb_phy *x, 106 int (*set_power)(struct usb_phy *x,
107 unsigned mA); 107 unsigned mA);
108 108
109 /* for non-OTG B devices: set transceiver into suspend mode */ 109 /* Set transceiver into suspend mode */
110 int (*set_suspend)(struct usb_phy *x, 110 int (*set_suspend)(struct usb_phy *x,
111 int suspend); 111 int suspend);
112 112
diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h
index 68adae83affc..c13632d5292e 100644
--- a/include/linux/usb/usb_phy_generic.h
+++ b/include/linux/usb/usb_phy_generic.h
@@ -2,6 +2,7 @@
2#define __LINUX_USB_NOP_XCEIV_H 2#define __LINUX_USB_NOP_XCEIV_H
3 3
4#include <linux/usb/otg.h> 4#include <linux/usb/otg.h>
5#include <linux/gpio/consumer.h>
5 6
6struct usb_phy_generic_platform_data { 7struct usb_phy_generic_platform_data {
7 enum usb_phy_type type; 8 enum usb_phy_type type;
@@ -11,6 +12,7 @@ struct usb_phy_generic_platform_data {
11 unsigned int needs_vcc:1; 12 unsigned int needs_vcc:1;
12 unsigned int needs_reset:1; /* deprecated */ 13 unsigned int needs_reset:1; /* deprecated */
13 int gpio_reset; 14 int gpio_reset;
15 struct gpio_desc *gpiod_vbus;
14}; 16};
15 17
16#if IS_ENABLED(CONFIG_NOP_USB_XCEIV) 18#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index b87696fdf06a..7d7acb35603d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -16,6 +16,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
16#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 16#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
17#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ 17#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
18#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 18#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
19#define VM_NO_GUARD 0x00000040 /* don't add guard page */
19/* bits [20..32] reserved for arch specific ioremap internals */ 20/* bits [20..32] reserved for arch specific ioremap internals */
20 21
21/* 22/*
@@ -75,7 +76,9 @@ extern void *vmalloc_32_user(unsigned long size);
75extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 76extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
76extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 77extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
77 unsigned long start, unsigned long end, gfp_t gfp_mask, 78 unsigned long start, unsigned long end, gfp_t gfp_mask,
78 pgprot_t prot, int node, const void *caller); 79 pgprot_t prot, unsigned long vm_flags, int node,
80 const void *caller);
81
79extern void vfree(const void *addr); 82extern void vfree(const void *addr);
80 83
81extern void *vmap(struct page **pages, unsigned int count, 84extern void *vmap(struct page **pages, unsigned int count,
@@ -96,8 +99,12 @@ void vmalloc_sync_all(void);
96 99
97static inline size_t get_vm_area_size(const struct vm_struct *area) 100static inline size_t get_vm_area_size(const struct vm_struct *area)
98{ 101{
99 /* return actual size without guard page */ 102 if (!(area->flags & VM_NO_GUARD))
100 return area->size - PAGE_SIZE; 103 /* return actual size without guard page */
104 return area->size - PAGE_SIZE;
105 else
106 return area->size;
107
101} 108}
102 109
103extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 110extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 5691f752ce8f..63df3a2a8ce5 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
74ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, 74ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
75 int mode); 75 int mode);
76ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 76ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
77 void *iov, size_t iov_size, int mode); 77 struct msghdr *msg, size_t iov_size, int mode);
78ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 78ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
79 struct msghdr *msg, size_t iov_size, int mode); 79 struct msghdr *msg, size_t iov_size, int mode);
80ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, 80ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 057db7d2f448..f38c10ba3ff5 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -21,10 +21,6 @@
21#ifndef VT_BUF_HAVE_RW 21#ifndef VT_BUF_HAVE_RW
22#define scr_writew(val, addr) (*(addr) = (val)) 22#define scr_writew(val, addr) (*(addr) = (val))
23#define scr_readw(addr) (*(addr)) 23#define scr_readw(addr) (*(addr))
24#define scr_memcpyw(d, s, c) memcpy(d, s, c)
25#define scr_memmovew(d, s, c) memmove(d, s, c)
26#define VT_BUF_HAVE_MEMCPYW
27#define VT_BUF_HAVE_MEMMOVEW
28#endif 24#endif
29 25
30#ifndef VT_BUF_HAVE_MEMSETW 26#ifndef VT_BUF_HAVE_MEMSETW
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2232ed16635a..2db83349865b 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -267,6 +267,21 @@ do { \
267 __wait_event(wq, condition); \ 267 __wait_event(wq, condition); \
268} while (0) 268} while (0)
269 269
270#define __io_wait_event(wq, condition) \
271 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
272 io_schedule())
273
274/*
275 * io_wait_event() -- like wait_event() but with io_schedule()
276 */
277#define io_wait_event(wq, condition) \
278do { \
279 might_sleep(); \
280 if (condition) \
281 break; \
282 __io_wait_event(wq, condition); \
283} while (0)
284
270#define __wait_event_freezable(wq, condition) \ 285#define __wait_event_freezable(wq, condition) \
271 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 286 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
272 schedule(); try_to_freeze()) 287 schedule(); try_to_freeze())
@@ -363,7 +378,6 @@ do { \
363 */ 378 */
364#define wait_event_cmd(wq, condition, cmd1, cmd2) \ 379#define wait_event_cmd(wq, condition, cmd1, cmd2) \
365do { \ 380do { \
366 might_sleep(); \
367 if (condition) \ 381 if (condition) \
368 break; \ 382 break; \
369 __wait_event_cmd(wq, condition, cmd1, cmd2); \ 383 __wait_event_cmd(wq, condition, cmd1, cmd2); \
@@ -991,6 +1005,32 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
991} 1005}
992 1006
993/** 1007/**
1008 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1009 * @word: the word being waited on, a kernel virtual address
1010 * @bit: the bit of the word being waited on
1011 * @mode: the task state to sleep in
1012 * @timeout: timeout, in jiffies
1013 *
1014 * Use the standard hashed waitqueue table to wait for a bit
1015 * to be cleared. This is similar to wait_on_bit(), except also takes a
1016 * timeout parameter.
1017 *
1018 * Returned value will be zero if the bit was cleared before the
1019 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1020 * received a signal and the mode permitted wakeup on that signal.
1021 */
1022static inline int
1023wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
1024{
1025 might_sleep();
1026 if (!test_bit(bit, word))
1027 return 0;
1028 return out_of_line_wait_on_bit_timeout(word, bit,
1029 bit_wait_timeout,
1030 mode, timeout);
1031}
1032
1033/**
994 * wait_on_bit_action - wait for a bit to be cleared 1034 * wait_on_bit_action - wait for a bit to be cleared
995 * @word: the word being waited on, a kernel virtual address 1035 * @word: the word being waited on, a kernel virtual address
996 * @bit: the bit of the word being waited on 1036 * @bit: the bit of the word being waited on
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b996e6cde6bb..74db135f9957 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -220,14 +220,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
220#endif 220#endif
221 221
222#define INIT_WORK(_work, _func) \ 222#define INIT_WORK(_work, _func) \
223 do { \ 223 __INIT_WORK((_work), (_func), 0)
224 __INIT_WORK((_work), (_func), 0); \
225 } while (0)
226 224
227#define INIT_WORK_ONSTACK(_work, _func) \ 225#define INIT_WORK_ONSTACK(_work, _func) \
228 do { \ 226 __INIT_WORK((_work), (_func), 1)
229 __INIT_WORK((_work), (_func), 1); \
230 } while (0)
231 227
232#define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 228#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
233 do { \ 229 do { \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a219be961c0a..00048339c23e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
177 struct writeback_control *wbc, writepage_t writepage, 177 struct writeback_control *wbc, writepage_t writepage,
178 void *data); 178 void *data);
179int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 179int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
180void set_page_dirty_balance(struct page *page);
181void writeback_set_ratelimit(void); 180void writeback_set_ratelimit(void);
182void tag_pages_for_writeback(struct address_space *mapping, 181void tag_pages_for_writeback(struct address_space *mapping,
183 pgoff_t start, pgoff_t end); 182 pgoff_t start, pgoff_t end);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index f14bd75f08b3..56529b34dc63 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -36,7 +36,8 @@ enum zpool_mapmode {
36 ZPOOL_MM_DEFAULT = ZPOOL_MM_RW 36 ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
37}; 37};
38 38
39struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops); 39struct zpool *zpool_create_pool(char *type, char *name,
40 gfp_t gfp, struct zpool_ops *ops);
40 41
41char *zpool_get_type(struct zpool *pool); 42char *zpool_get_type(struct zpool *pool);
42 43
@@ -80,7 +81,7 @@ struct zpool_driver {
80 atomic_t refcount; 81 atomic_t refcount;
81 struct list_head list; 82 struct list_head list;
82 83
83 void *(*create)(gfp_t gfp, struct zpool_ops *ops); 84 void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops);
84 void (*destroy)(void *pool); 85 void (*destroy)(void *pool);
85 86
86 int (*malloc)(void *pool, size_t size, gfp_t gfp, 87 int (*malloc)(void *pool, size_t size, gfp_t gfp,
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 05c214760977..3283c6a55425 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -36,7 +36,7 @@ enum zs_mapmode {
36 36
37struct zs_pool; 37struct zs_pool;
38 38
39struct zs_pool *zs_create_pool(gfp_t flags); 39struct zs_pool *zs_create_pool(char *name, gfp_t flags);
40void zs_destroy_pool(struct zs_pool *pool); 40void zs_destroy_pool(struct zs_pool *pool);
41 41
42unsigned long zs_malloc(struct zs_pool *pool, size_t size); 42unsigned long zs_malloc(struct zs_pool *pool, size_t size);